diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 6bba848939..84a10e0005 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.46.0" + ".": "1.47.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index efe82b715c..aa160ed2c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,40 @@ # Changelog +## [1.47.0](https://github.com/googleapis/python-aiplatform/compare/v1.46.0...v1.47.0) (2024-04-06) + + +### Features + +* Add display experiment button for tuning in Ipython environments ([9bb687c](https://github.com/googleapis/python-aiplatform/commit/9bb687c20b03ea7227908e09831fb1a13ac3a970)) +* Add Persistent Resource ID parameter to Custom Job form_local_script, run, and submit methods. ([f5be0b5](https://github.com/googleapis/python-aiplatform/commit/f5be0b5652b0366eb6e823409ba1cb134e4b7b7c)) +* Add Persistent Resource Id parameter to Custom Training Job run and submit methods. ([f428006](https://github.com/googleapis/python-aiplatform/commit/f428006507e9b053a2121089e89fc54aedd3550a)) +* Added GA support for PersistentResource management ([98a07dd](https://github.com/googleapis/python-aiplatform/commit/98a07dd614063cb6a4c55c9024893874d3c95a1f)) +* Added GA support for running Custom and Hp tuning jobs on Persistent Resources ([35ecbac](https://github.com/googleapis/python-aiplatform/commit/35ecbac53df299b681e835648a9884a091f5d4d8)) +* Added the `Experiment.dashboard_url` property ([c8eec21](https://github.com/googleapis/python-aiplatform/commit/c8eec21d6f6e6f016669a18e19cebd9de1f0a7f9)) +* GenAI - Added support for `SafetySetting.method` (probability or severity) ([317ab8f](https://github.com/googleapis/python-aiplatform/commit/317ab8f7499b345c5a73365b95e9ba91c1adfecf)) +* GenAI - Added support for supervised fine-tuning ([036d2d0](https://github.com/googleapis/python-aiplatform/commit/036d2d0306e5190c972d2c4e5dd34257ea8fad6d)) +* GenAI - Added support for system instructions ([4990eb6](https://github.com/googleapis/python-aiplatform/commit/4990eb6ade736c85c08455ca0ef9f7c9515662fd)) +* GenAI - Forced function calling feature ([806ef9f](https://github.com/googleapis/python-aiplatform/commit/806ef9fe860c51ee99481cb7f209723a22a1d369)) +* Initial template for Langchain on Vertex. ([0752a29](https://github.com/googleapis/python-aiplatform/commit/0752a29ec69280373519406be8528682f1c547ec)) +* LLM - Add RLHF-tuning support for `text-bison@002` ([1f27c3e](https://github.com/googleapis/python-aiplatform/commit/1f27c3eb5bb95b04f5e1708d631309c928df932b)) +* Vertex AI Extension SDK Public Preview ([137b5e1](https://github.com/googleapis/python-aiplatform/commit/137b5e11ab40abfd4f53c61544989d99770dabeb)) +* Vertex AI Reasoning Engine SDK Public Preview ([6aaa5d0](https://github.com/googleapis/python-aiplatform/commit/6aaa5d01c7e675bb1a553fd6780b035e3513d58e)) +* Vertex Rapid Evaluation SDK and Prompt Template for Vertex Prompt Management Public Preview ([8c6ddf5](https://github.com/googleapis/python-aiplatform/commit/8c6ddf54adf91e2fbf00034fef413ccfde3769d6)) + + +### Bug Fixes + +* GenAI - Fixed response validation error during streaming ([c881998](https://github.com/googleapis/python-aiplatform/commit/c881998c7fb54289efc5ae6f5431b631b3d6c11c)) +* GenAI - Fixed the `GenerativeModel`'s handling of tuned models from different region ([bf33fb3](https://github.com/googleapis/python-aiplatform/commit/bf33fb3e0053898cf8ba919180ee246ea5ad1cdb)) +* GenAI - Fixed the TuningJob dashboard URL ([5367fbb](https://github.com/googleapis/python-aiplatform/commit/5367fbb1125debd16357e4815c704105209fca2a)) +* Reinstate persistent resource preview class and tests ([765d60d](https://github.com/googleapis/python-aiplatform/commit/765d60da57b3e3c947b121667f0a2a2cb93b88f3)) + + +### Documentation + +* Add run custom job on persistent resource sample. ([53fc845](https://github.com/googleapis/python-aiplatform/commit/53fc8455145c9fb7953a6e7dd6e85aab01055ad2)) +* Add run custom job on persistent resource sample. ([31100c6](https://github.com/googleapis/python-aiplatform/commit/31100c6dd6d7d4b0b588ed7f008e7661835b19d2)) + ## [1.46.0](https://github.com/googleapis/python-aiplatform/compare/v1.45.0...v1.46.0) (2024-03-30) diff --git a/docs/aiplatform_v1/gen_ai_tuning_service.rst b/docs/aiplatform_v1/gen_ai_tuning_service.rst new file mode 100644 index 0000000000..807d76649a --- /dev/null +++ b/docs/aiplatform_v1/gen_ai_tuning_service.rst @@ -0,0 +1,10 @@ +GenAiTuningService +------------------------------------ + +.. automodule:: google.cloud.aiplatform_v1.services.gen_ai_tuning_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.gen_ai_tuning_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1/notebook_service.rst b/docs/aiplatform_v1/notebook_service.rst new file mode 100644 index 0000000000..de8f96cd4c --- /dev/null +++ b/docs/aiplatform_v1/notebook_service.rst @@ -0,0 +1,10 @@ +NotebookService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.notebook_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.notebook_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1/persistent_resource_service.rst b/docs/aiplatform_v1/persistent_resource_service.rst new file mode 100644 index 0000000000..173d53d1ab --- /dev/null +++ b/docs/aiplatform_v1/persistent_resource_service.rst @@ -0,0 +1,10 @@ +PersistentResourceService +------------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1.services.persistent_resource_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1.services.persistent_resource_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1/services_.rst b/docs/aiplatform_v1/services_.rst index 0f0396abb6..a66d73dac9 100644 --- a/docs/aiplatform_v1/services_.rst +++ b/docs/aiplatform_v1/services_.rst @@ -11,6 +11,7 @@ Services for Google Cloud Aiplatform v1 API feature_registry_service featurestore_online_serving_service featurestore_service + gen_ai_tuning_service index_endpoint_service index_service job_service @@ -20,6 +21,8 @@ Services for Google Cloud Aiplatform v1 API migration_service model_garden_service model_service + notebook_service + persistent_resource_service pipeline_service prediction_service schedule_service diff --git a/docs/aiplatform_v1beta1/evaluation_service.rst b/docs/aiplatform_v1beta1/evaluation_service.rst new file mode 100644 index 0000000000..c7454d934e --- /dev/null +++ b/docs/aiplatform_v1beta1/evaluation_service.rst @@ -0,0 +1,6 @@ +EvaluationService +----------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.evaluation_service + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/extension_execution_service.rst b/docs/aiplatform_v1beta1/extension_execution_service.rst new file mode 100644 index 0000000000..778d56e4fc --- /dev/null +++ b/docs/aiplatform_v1beta1/extension_execution_service.rst @@ -0,0 +1,6 @@ +ExtensionExecutionService +------------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.extension_execution_service + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/extension_registry_service.rst b/docs/aiplatform_v1beta1/extension_registry_service.rst new file mode 100644 index 0000000000..4848e669ab --- /dev/null +++ b/docs/aiplatform_v1beta1/extension_registry_service.rst @@ -0,0 +1,10 @@ +ExtensionRegistryService +------------------------------------------ + +.. automodule:: google.cloud.aiplatform_v1beta1.services.extension_registry_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.extension_registry_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/notebook_service.rst b/docs/aiplatform_v1beta1/notebook_service.rst new file mode 100644 index 0000000000..ec0b259e25 --- /dev/null +++ b/docs/aiplatform_v1beta1/notebook_service.rst @@ -0,0 +1,10 @@ +NotebookService +--------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.notebook_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.notebook_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/services_.rst b/docs/aiplatform_v1beta1/services_.rst index 7163108b14..bad04de935 100644 --- a/docs/aiplatform_v1beta1/services_.rst +++ b/docs/aiplatform_v1beta1/services_.rst @@ -6,6 +6,9 @@ Services for Google Cloud Aiplatform v1beta1 API dataset_service deployment_resource_pool_service endpoint_service + evaluation_service + extension_execution_service + extension_registry_service feature_online_store_admin_service feature_online_store_service feature_registry_service @@ -20,6 +23,7 @@ Services for Google Cloud Aiplatform v1beta1 API migration_service model_garden_service model_service + notebook_service persistent_resource_service pipeline_service prediction_service @@ -28,4 +32,6 @@ Services for Google Cloud Aiplatform v1beta1 API schedule_service specialist_pool_service tensorboard_service + vertex_rag_data_service + vertex_rag_service vizier_service diff --git a/docs/aiplatform_v1beta1/vertex_rag_data_service.rst b/docs/aiplatform_v1beta1/vertex_rag_data_service.rst new file mode 100644 index 0000000000..0c974c2931 --- /dev/null +++ b/docs/aiplatform_v1beta1/vertex_rag_data_service.rst @@ -0,0 +1,10 @@ +VertexRagDataService +-------------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service + :members: + :inherited-members: + +.. automodule:: google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.pagers + :members: + :inherited-members: diff --git a/docs/aiplatform_v1beta1/vertex_rag_service.rst b/docs/aiplatform_v1beta1/vertex_rag_service.rst new file mode 100644 index 0000000000..0128347a5b --- /dev/null +++ b/docs/aiplatform_v1beta1/vertex_rag_service.rst @@ -0,0 +1,6 @@ +VertexRagService +---------------------------------- + +.. automodule:: google.cloud.aiplatform_v1beta1.services.vertex_rag_service + :members: + :inherited-members: diff --git a/google/cloud/aiplatform/base.py b/google/cloud/aiplatform/base.py index cb4c8ecdf4..8f595d9d1a 100644 --- a/google/cloud/aiplatform/base.py +++ b/google/cloud/aiplatform/base.py @@ -950,36 +950,7 @@ def wrapper(*args, **kwargs): return optional_run_in_thread -class VertexAiResourceNounWithFutureManager(VertexAiResourceNoun, FutureManager): - """Allows optional asynchronous calls to this Vertex AI Resource - Nouns.""" - - def __init__( - self, - project: Optional[str] = None, - location: Optional[str] = None, - credentials: Optional[auth_credentials.Credentials] = None, - resource_name: Optional[str] = None, - ): - """Initializes class with project, location, and api_client. - - Args: - project (str): Optional. Project of the resource noun. - location (str): Optional. The location of the resource noun. - credentials(google.auth.credentials.Credentials): - Optional. custom credentials to use when accessing interacting with - resource noun. - resource_name(str): A fully-qualified resource name or ID. - """ - VertexAiResourceNoun.__init__( - self, - project=project, - location=location, - credentials=credentials, - resource_name=resource_name, - ) - FutureManager.__init__(self) - +class _VertexAiResourceNounPlus(VertexAiResourceNoun): @classmethod def _empty_constructor( cls, @@ -987,12 +958,9 @@ def _empty_constructor( location: Optional[str] = None, credentials: Optional[auth_credentials.Credentials] = None, resource_name: Optional[str] = None, - ) -> "VertexAiResourceNounWithFutureManager": + ) -> "_VertexAiResourceNounPlus": """Initializes with all attributes set to None. - The attributes should be populated after a future is complete. This allows - scheduling of additional API calls before the resource is created. - Args: project (str): Optional. Project of the resource noun. location (str): Optional. The location of the resource noun. @@ -1011,39 +979,9 @@ def _empty_constructor( credentials=credentials, resource_name=resource_name, ) - FutureManager.__init__(self) self._gca_resource = None return self - def _sync_object_with_future_result( - self, result: "VertexAiResourceNounWithFutureManager" - ): - """Populates attributes from a Future result to this object. - - Args: - result: VertexAiResourceNounWithFutureManager - Required. Result of future with same type as this object. - """ - sync_attributes = [ - "project", - "location", - "api_client", - "_gca_resource", - "credentials", - ] - optional_sync_attributes = [ - "_authorized_session", - "_raw_predict_request_url", - ] - - for attribute in sync_attributes: - setattr(self, attribute, getattr(result, attribute)) - - for attribute in optional_sync_attributes: - value = getattr(result, attribute, None) - if value: - setattr(self, attribute, value) - @classmethod def _construct_sdk_resource_from_gapic( cls, @@ -1258,6 +1196,111 @@ def _list_with_local_order( return li + def _delete(self) -> None: + """Deletes this Vertex AI resource. WARNING: This deletion is permanent.""" + _LOGGER.log_action_start_against_resource("Deleting", "", self) + lro = getattr(self.api_client, self._delete_method)(name=self.resource_name) + _LOGGER.log_action_started_against_resource_with_lro( + "Delete", "", self.__class__, lro + ) + lro.result() + _LOGGER.log_action_completed_against_resource("deleted.", "", self) + + +class VertexAiResourceNounWithFutureManager(_VertexAiResourceNounPlus, FutureManager): + """Allows optional asynchronous calls to this Vertex AI Resource + Nouns.""" + + def __init__( + self, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + resource_name: Optional[str] = None, + ): + """Initializes class with project, location, and api_client. + + Args: + project (str): Optional. Project of the resource noun. + location (str): Optional. The location of the resource noun. + credentials(google.auth.credentials.Credentials): + Optional. custom credentials to use when accessing interacting with + resource noun. + resource_name(str): A fully-qualified resource name or ID. + """ + _VertexAiResourceNounPlus.__init__( + self, + project=project, + location=location, + credentials=credentials, + resource_name=resource_name, + ) + FutureManager.__init__(self) + + @classmethod + def _empty_constructor( + cls, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + resource_name: Optional[str] = None, + ) -> "VertexAiResourceNounWithFutureManager": + """Initializes with all attributes set to None. + + The attributes should be populated after a future is complete. This allows + scheduling of additional API calls before the resource is created. + + Args: + project (str): Optional. Project of the resource noun. + location (str): Optional. The location of the resource noun. + credentials(google.auth.credentials.Credentials): + Optional. custom credentials to use when accessing interacting with + resource noun. + resource_name(str): A fully-qualified resource name or ID. + Returns: + An instance of this class with attributes set to None. + """ + self = cls.__new__(cls) + VertexAiResourceNoun.__init__( + self, + project=project, + location=location, + credentials=credentials, + resource_name=resource_name, + ) + FutureManager.__init__(self) + self._gca_resource = None + return self + + def _sync_object_with_future_result( + self, result: "VertexAiResourceNounWithFutureManager" + ): + """Populates attributes from a Future result to this object. + + Args: + result: VertexAiResourceNounWithFutureManager + Required. Result of future with same type as this object. + """ + sync_attributes = [ + "project", + "location", + "api_client", + "_gca_resource", + "credentials", + ] + optional_sync_attributes = [ + "_authorized_session", + "_raw_predict_request_url", + ] + + for attribute in sync_attributes: + setattr(self, attribute, getattr(result, attribute)) + + for attribute in optional_sync_attributes: + value = getattr(result, attribute, None) + if value: + setattr(self, attribute, value) + @classmethod def list( cls, @@ -1322,13 +1365,7 @@ def delete(self, sync: bool = True) -> None: will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. """ - _LOGGER.log_action_start_against_resource("Deleting", "", self) - lro = getattr(self.api_client, self._delete_method)(name=self.resource_name) - _LOGGER.log_action_started_against_resource_with_lro( - "Delete", "", self.__class__, lro - ) - lro.result() - _LOGGER.log_action_completed_against_resource("deleted.", "", self) + self._delete() def __repr__(self) -> str: if self._gca_resource and self._resource_is_available: diff --git a/google/cloud/aiplatform/compat/services/__init__.py b/google/cloud/aiplatform/compat/services/__init__.py index ce7665be9c..e16b3a7953 100644 --- a/google/cloud/aiplatform/compat/services/__init__.py +++ b/google/cloud/aiplatform/compat/services/__init__.py @@ -24,6 +24,12 @@ from google.cloud.aiplatform_v1beta1.services.endpoint_service import ( client as endpoint_service_client_v1beta1, ) +from google.cloud.aiplatform_v1beta1.services.extension_execution_service import ( + client as extension_execution_service_client_v1beta1, +) +from google.cloud.aiplatform_v1beta1.services.extension_registry_service import ( + client as extension_registry_service_client_v1beta1, +) from google.cloud.aiplatform_v1beta1.services.feature_online_store_service import ( client as feature_online_store_service_client_v1beta1, ) @@ -124,6 +130,9 @@ from google.cloud.aiplatform_v1.services.model_service import ( client as model_service_client_v1, ) +from google.cloud.aiplatform_v1.services.persistent_resource_service import ( + client as persistent_resource_service_client_v1, +) from google.cloud.aiplatform_v1.services.pipeline_service import ( client as pipeline_service_client_v1, ) @@ -160,6 +169,7 @@ metadata_service_client_v1, model_garden_service_client_v1, model_service_client_v1, + persistent_resource_service_client_v1, pipeline_service_client_v1, prediction_service_client_v1, prediction_service_async_client_v1, diff --git a/google/cloud/aiplatform/compat/types/__init__.py b/google/cloud/aiplatform/compat/types/__init__.py index d36312b025..81c5dd9b4c 100644 --- a/google/cloud/aiplatform/compat/types/__init__.py +++ b/google/cloud/aiplatform/compat/types/__init__.py @@ -156,6 +156,8 @@ model_service as model_service_v1, model_monitoring as model_monitoring_v1, operation as operation_v1, + persistent_resource as persistent_resource_v1, + persistent_resource_service as persistent_resource_service_v1, pipeline_failure_policy as pipeline_failure_policy_v1, pipeline_job as pipeline_job_v1, pipeline_service as pipeline_service_v1, @@ -230,7 +232,8 @@ model_service_v1, model_monitoring_v1, operation_v1, - persistent_resource_v1beta1, + persistent_resource_v1, + persistent_resource_service_v1, pipeline_failure_policy_v1, pipeline_job_v1, pipeline_service_v1, @@ -306,6 +309,8 @@ model_service_v1beta1, model_monitoring_v1beta1, operation_v1beta1, + persistent_resource_v1beta1, + persistent_resource_service_v1beta1, pipeline_failure_policy_v1beta1, pipeline_job_v1beta1, pipeline_service_v1beta1, diff --git a/google/cloud/aiplatform/constants/prediction.py b/google/cloud/aiplatform/constants/prediction.py index bee71d0cad..5f7e8bcd3f 100644 --- a/google/cloud/aiplatform/constants/prediction.py +++ b/google/cloud/aiplatform/constants/prediction.py @@ -34,9 +34,6 @@ XGBOOST = "xgboost" XGBOOST_CONTAINER_URIS = [ - "us-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.2-0:latest", - "europe-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.2-0:latest", - "asia-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.2-0:latest", "us-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.1-7:latest", "europe-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.1-7:latest", "asia-docker.pkg.dev/vertex-ai/prediction/xgboost-cpu.1-7:latest", diff --git a/google/cloud/aiplatform/gapic_version.py b/google/cloud/aiplatform/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/gapic_version.py +++ b/google/cloud/aiplatform/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/jobs.py b/google/cloud/aiplatform/jobs.py index 188b671b5c..9231db4e57 100644 --- a/google/cloud/aiplatform/jobs.py +++ b/google/cloud/aiplatform/jobs.py @@ -1731,6 +1731,7 @@ def __init__( labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, + persistent_resource_id: Optional[str] = None, ): """Constructs a Custom Job with Worker Pool Specs. @@ -1802,6 +1803,13 @@ def __init__( staging_bucket (str): Optional. Bucket for produced custom job artifacts. Overrides staging_bucket set in aiplatform.init. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network and CMEK configs on + the job should be consistent with those on the PersistentResource, + otherwise, the job will be rejected. Raises: RuntimeError: If staging bucket was not set using aiplatform.init @@ -1836,6 +1844,7 @@ def __init__( base_output_directory=gca_io_compat.GcsDestination( output_uri_prefix=base_output_dir ), + persistent_resource_id=persistent_resource_id, ), labels=labels, encryption_spec=initializer.global_config.get_encryption_spec( @@ -1914,6 +1923,7 @@ def from_local_script( labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, staging_bucket: Optional[str] = None, + persistent_resource_id: Optional[str] = None, ) -> "CustomJob": """Configures a custom job from a local script. @@ -2017,6 +2027,13 @@ def from_local_script( staging_bucket (str): Optional. Bucket for produced custom job artifacts. Overrides staging_bucket set in aiplatform.init. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. Raises: RuntimeError: If staging bucket was not set using aiplatform.init @@ -2162,6 +2179,7 @@ def from_local_script( labels=labels, encryption_spec_key_name=encryption_spec_key_name, staging_bucket=staging_bucket, + persistent_resource_id=persistent_resource_id, ) if enable_autolog: @@ -2182,6 +2200,7 @@ def run( sync: bool = True, create_request_timeout: Optional[float] = None, disable_retries: bool = False, + persistent_resource_id: Optional[str] = None, ) -> None: """Run this configured CustomJob. @@ -2243,6 +2262,13 @@ def run( Indicates if the job should retry for internal errors after the job starts running. If True, overrides `restart_job_on_worker_restart` to False. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. """ network = network or initializer.global_config.network service_account = service_account or initializer.global_config.service_account @@ -2259,6 +2285,7 @@ def run( sync=sync, create_request_timeout=create_request_timeout, disable_retries=disable_retries, + persistent_resource_id=persistent_resource_id, ) @base.optional_sync() @@ -2275,6 +2302,7 @@ def _run( sync: bool = True, create_request_timeout: Optional[float] = None, disable_retries: bool = False, + persistent_resource_id: Optional[str] = None, ) -> None: """Helper method to ensure network synchronization and to run the configured CustomJob. @@ -2334,6 +2362,13 @@ def _run( Indicates if the job should retry for internal errors after the job starts running. If True, overrides `restart_job_on_worker_restart` to False. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. """ self.submit( service_account=service_account, @@ -2346,6 +2381,7 @@ def _run( tensorboard=tensorboard, create_request_timeout=create_request_timeout, disable_retries=disable_retries, + persistent_resource_id=persistent_resource_id, ) self._block_until_complete() @@ -2363,6 +2399,7 @@ def submit( tensorboard: Optional[str] = None, create_request_timeout: Optional[float] = None, disable_retries: bool = False, + persistent_resource_id: Optional[str] = None, ) -> None: """Submit the configured CustomJob. @@ -2419,6 +2456,13 @@ def submit( Indicates if the job should retry for internal errors after the job starts running. If True, overrides `restart_job_on_worker_restart` to False. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. Raises: ValueError: @@ -2455,6 +2499,9 @@ def submit( if tensorboard: self._gca_resource.job_spec.tensorboard = tensorboard + if persistent_resource_id: + self._gca_resource.job_spec.persistent_resource_id = persistent_resource_id + # TODO(b/275105711) Update implementation after experiment/run in the proto if experiment: # short-term solution to set experiment/experimentRun in SDK @@ -2669,7 +2716,8 @@ def __init__( of any UTF-8 characters. custom_job (aiplatform.CustomJob): Required. Configured CustomJob. The worker pool spec from this custom job - applies to the CustomJobs created in all the trials. + applies to the CustomJobs created in all the trials. A persistent_resource_id can be + specified on the custom job to be used when running this Hyperparameter Tuning job. metric_spec: Dict[str, str] Required. Dictionary representing metrics to optimize. The dictionary key is the metric_id, which is reported by your training job, and the dictionary value is the diff --git a/google/cloud/aiplatform/metadata/experiment_resources.py b/google/cloud/aiplatform/metadata/experiment_resources.py index f173f527e7..ab38c7dff1 100644 --- a/google/cloud/aiplatform/metadata/experiment_resources.py +++ b/google/cloud/aiplatform/metadata/experiment_resources.py @@ -622,6 +622,12 @@ def _log_experiment_loggable(self, experiment_loggable: "_ExperimentLoggable"): context = experiment_loggable._get_context() self._metadata_context.add_context_children([context]) + @property + def dashboard_url(self) -> Optional[str]: + """Cloud console URL for this resource.""" + url = f"https://console.cloud.google.com/vertex-ai/experiments/locations/{self._metadata_context.location}/experiments/{self._metadata_context.name}?project={self._metadata_context.project}" + return url + class _SetLoggerLevel: """Helper method to suppress logging.""" diff --git a/google/cloud/aiplatform/persistent_resource.py b/google/cloud/aiplatform/persistent_resource.py new file mode 100644 index 0000000000..0bd6dbe404 --- /dev/null +++ b/google/cloud/aiplatform/persistent_resource.py @@ -0,0 +1,422 @@ +# -*- coding: utf-8 -*- + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Dict, List, Optional, Union + +from google.api_core import operation +from google.auth import credentials as auth_credentials +from google.cloud.aiplatform import base +from google.cloud.aiplatform import initializer +from google.cloud.aiplatform import utils +from google.cloud.aiplatform.compat.services import ( + persistent_resource_service_client_v1 as persistent_resource_service_client_compat, +) +from google.cloud.aiplatform.compat.types import ( + encryption_spec as gca_encryption_spec_compat, + persistent_resource_v1 as gca_persistent_resource_compat, +) +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + +_LOGGER = base.Logger(__name__) + + +class PersistentResource(base.VertexAiResourceNounWithFutureManager): + """Managed PersistentResource feature for Vertex AI.""" + + client_class = utils.PersistentResourceClientWithOverride + _resource_noun = "persistentResource" + _getter_method = "get_persistent_resource" + _list_method = "list_persistent_resources" + _delete_method = "delete_persistent_resource" + _parse_resource_name_method = "parse_persistent_resource_path" + _format_resource_name_method = "persistent_resource_path" + + def __init__( + self, + persistent_resource_id: str, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + ): + """Retrieves the PersistentResource and instantiates its representation. + + Args: + persistent_resource_id (str): + Required. + project (str): + Project this PersistentResource is in. Overrides + project set in aiplatform.init. + location (str): + Location this PersistentResource is in. Overrides + location set in aiplatform.init. + credentials (auth_credentials.Credentials): + Custom credentials to use to manage this PersistentResource. + Overrides credentials set in aiplatform.init. + """ + super().__init__( + project=project, + location=location, + credentials=credentials, + resource_name=persistent_resource_id, + ) + + self._gca_resource = self._get_gca_resource( + resource_name=persistent_resource_id + ) + + @property + def display_name(self) -> Optional[str]: + """The display name of the PersistentResource.""" + self._assert_gca_resource_is_available() + return getattr(self._gca_resource, "display_name", None) + + @property + def state(self) -> gca_persistent_resource_compat.PersistentResource.State: + """The state of the PersistentResource. + + Values: + STATE_UNSPECIFIED (0): + Not set. + PROVISIONING (1): + The PROVISIONING state indicates the + persistent resources is being created. + RUNNING (3): + The RUNNING state indicates the persistent + resources is healthy and fully usable. + STOPPING (4): + The STOPPING state indicates the persistent + resources is being deleted. + ERROR (5): + The ERROR state indicates the persistent resources may be + unusable. Details can be found in the ``error`` field. + """ + self._assert_gca_resource_is_available() + return getattr(self._gca_resource, "state", None) + + @property + def error(self) -> Optional[status_pb2.Status]: + """The error status of the PersistentResource. + + Only populated when the resource's state is ``STOPPING`` or ``ERROR``. + + """ + self._assert_gca_resource_is_available() + return getattr(self._gca_resource, "error", None) + + @property + def create_time(self) -> Optional[timestamp_pb2.Timestamp]: + """Time when the PersistentResource was created.""" + self._assert_gca_resource_is_available() + return getattr(self._gca_resource, "create_time", None) + + @property + def start_time(self) -> Optional[timestamp_pb2.Timestamp]: + """Time when the PersistentResource first entered the ``RUNNING`` state.""" + self._assert_gca_resource_is_available() + return getattr(self._gca_resource, "start_time", None) + + @property + def update_time(self) -> Optional[timestamp_pb2.Timestamp]: + """Time when the PersistentResource was most recently updated.""" + self._assert_gca_resource_is_available() + return getattr(self._gca_resource, "update_time", None) + + @property + def network(self) -> Optional[str]: + """The network peered with the PersistentResource. + + The full name of the Compute Engine + `network `__ to peered + with Vertex AI to host the persistent resources. + + For example, ``projects/12345/global/networks/myVPC``. + `Format `__ is of the + form ``projects/{project}/global/networks/{network}``. Where {project} + is a project number, as in ``12345``, and {network} is a network name. + + To specify this field, you must have already `configured VPC Network + Peering for Vertex + AI `__. + + If this field is left unspecified, the resources aren't peered with any + network. + """ + self._assert_gca_resource_is_available() + return getattr(self._gca_resource, "network", None) + + @classmethod + @base.optional_sync() + def create( + cls, + persistent_resource_id: str, + resource_pools: Union[ + List[Dict], List[gca_persistent_resource_compat.ResourcePool] + ], + display_name: Optional[str] = None, + labels: Optional[Dict[str, str]] = None, + network: Optional[str] = None, + kms_key_name: Optional[str] = None, + service_account: Optional[str] = None, + reserved_ip_ranges: List[str] = None, + sync: Optional[bool] = True, # pylint: disable=unused-argument + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + ) -> "PersistentResource": + r"""Creates a PersistentResource. + + Args: + persistent_resource_id (str): + Required. The ID to use for the PersistentResource, + which become the final component of the + PersistentResource's resource name. + + The maximum length is 63 characters, and valid + characters are ``/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/``. + + This corresponds to the ``persistent_resource_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_pools (MutableSequence[google.cloud.aiplatform_v1.types.ResourcePool]): + Required. The list of resource pools to create for the + PersistentResource. + display_name (str): + Optional. The display name of the + PersistentResource. The name can be up to 128 + characters long and can consist of any UTF-8 + characters. + labels (MutableMapping[str, str]): + Optional. The labels with user-defined + metadata to organize PersistentResource. + + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + + See https://goo.gl/xmQnxf for more information + and examples of labels. + network (str): + Optional. The full name of the Compute Engine + `network `__ + to peered with Vertex AI to host the persistent resources. + For example, ``projects/12345/global/networks/myVPC``. + `Format `__ + is of the form + ``projects/{project}/global/networks/{network}``. Where + {project} is a project number, as in ``12345``, and + {network} is a network name. + + To specify this field, you must have already `configured VPC + Network Peering for Vertex + AI `__. + + If this field is left unspecified, the resources aren't + peered with any network. + kms_key_name (str): + Optional. Customer-managed encryption key for the + PersistentResource. If set, this PersistentResource and all + sub-resources of this PersistentResource will be secured by + this key. + service_account (str): + Optional. Default service account that this + PersistentResource's workloads run as. The workloads + including + + - Any runtime specified via ``ResourceRuntimeSpec`` on + creation time, for example, Ray. + - Jobs submitted to PersistentResource, if no other service + account specified in the job specs. + + Only works when custom service account is enabled and users + have the ``iam.serviceAccounts.actAs`` permission on this + service account. + reserved_ip_ranges (MutableSequence[str]): + Optional. A list of names for the reserved IP ranges under + the VPC network that can be used for this persistent + resource. + + If set, we will deploy the persistent resource within the + provided IP ranges. Otherwise, the persistent resource is + deployed to any IP ranges under the provided VPC network. + + Example ['vertex-ai-ip-range']. + sync (bool): + Whether to execute this method synchonously. If False, this + method will be executed in concurrent Future and any downstream + object will be immediately returned and synced when the Future + has completed. + project (str): + Project to create this PersistentResource in. Overrides project + set in aiplatform.init. + location (str): + Location to create this PersistentResource in. Overrides + location set in aiplatform.init. + credentials (auth_credentials.Credentials): + Custom credentials to use to create this PersistentResource. + Overrides credentials set in aiplatform.init. + + Returns: + persistent_resource (PersistentResource): + The object representation of the newly created + PersistentResource. + """ + + if labels: + utils.validate_labels(labels) + + gca_persistent_resource = gca_persistent_resource_compat.PersistentResource( + name=persistent_resource_id, + display_name=display_name, + resource_pools=resource_pools, + labels=labels, + network=network, + reserved_ip_ranges=reserved_ip_ranges, + ) + + if kms_key_name: + gca_persistent_resource.encryption_spec = ( + gca_encryption_spec_compat.EncryptionSpec(kms_key_name=kms_key_name) + ) + + if service_account: + service_account_spec = gca_persistent_resource_compat.ServiceAccountSpec( + enable_custom_service_account=True, service_account=service_account + ) + gca_persistent_resource.resource_runtime_spec = ( + gca_persistent_resource_compat.ResourceRuntimeSpec( + service_account_spec=service_account_spec + ) + ) + + api_client = cls._instantiate_client(location, credentials) + create_lro = cls._create( + api_client=api_client, + parent=initializer.global_config.common_location_path( + project=project, location=location + ), + persistent_resource=gca_persistent_resource, + persistent_resource_id=persistent_resource_id, + ) + + _LOGGER.log_create_with_lro(cls, create_lro) + + create_lro.result(timeout=None) + persistent_resource_result = cls( + persistent_resource_id=persistent_resource_id, + project=project, + location=location, + credentials=credentials, + ) + + _LOGGER.log_create_complete( + cls, persistent_resource_result._gca_resource, "persistent resource" + ) + + return persistent_resource_result + + @classmethod + def _create( + cls, + api_client: ( + persistent_resource_service_client_compat.PersistentResourceServiceClient + ), + parent: str, + persistent_resource: gca_persistent_resource_compat.PersistentResource, + persistent_resource_id: str, + create_request_timeout: Optional[float] = None, + ) -> operation.Operation: + """Creates a PersistentResource directly calling the API client. + + Args: + api_client (PersistentResourceServiceClient): + An instance of PersistentResourceServiceClient with the correct + api_endpoint already set based on user's preferences. + parent (str): + Required. Also known as common location path, that usually contains the + project and location that the user provided to the upstream method. + IE "projects/my-project/locations/us-central1" + persistent_resource (gca_persistent_resource_compat.PersistentResource): + Required. The PersistentResource object to use for the create request. + persistent_resource_id (str): + Required. The ID to use for the PersistentResource, + which become the final component of the + PersistentResource's resource name. + + The maximum length is 63 characters, and valid + characters are ``/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/``. + + This corresponds to the ``persistent_resource_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + create_request_timeout (float): + Optional. The timeout for the create request in seconds. + + Returns: + operation (Operation): + The long-running operation returned by the Persistent Resource + create call. + """ + return api_client.create_persistent_resource( + parent=parent, + persistent_resource_id=persistent_resource_id, + persistent_resource=persistent_resource, + timeout=create_request_timeout, + ) + + @classmethod + def list( + cls, + filter: Optional[str] = None, + order_by: Optional[str] = None, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + ) -> List["PersistentResource"]: + """Lists a Persistent Resources on the provided project and region. + + Args: + filter (str): + Optional. An expression for filtering the results of the request. + For field names both snake_case and camelCase are supported. + order_by (str): + Optional. A comma-separated list of fields to order by, sorted in + ascending order. Use "desc" after a field name for descending. + Supported fields: `display_name`, `create_time`, `update_time` + project (str): + Optional. Project to retrieve list from. If not set, project + set in aiplatform.init will be used. + location (str): + Optional. Location to retrieve list from. If not set, location + set in aiplatform.init will be used. + credentials (auth_credentials.Credentials): + Optional. Custom credentials to use to retrieve list. Overrides + credentials set in aiplatform.init. + + Returns: + List[PersistentResource] + A list of PersistentResource objects. + """ + return cls._list_with_local_order( + filter=filter, + order_by=order_by, + project=project, + location=location, + credentials=credentials, + ) diff --git a/google/cloud/aiplatform/preview/jobs.py b/google/cloud/aiplatform/preview/jobs.py index 2c81bb46fc..8104b7c4c8 100644 --- a/google/cloud/aiplatform/preview/jobs.py +++ b/google/cloud/aiplatform/preview/jobs.py @@ -70,7 +70,7 @@ class CustomJob(jobs.CustomJob): - """Vertex AI Custom Job.""" + """Deprecated. Vertex AI Custom Job (preview).""" def __init__( self, @@ -88,7 +88,9 @@ def __init__( staging_bucket: Optional[str] = None, persistent_resource_id: Optional[str] = None, ): - """Constructs a Custom Job with Worker Pool Specs. + """Deprecated. Please use the GA (non-preview) version of this class. + + Constructs a Custom Job with Worker Pool Specs. ``` Example usage: @@ -472,7 +474,7 @@ def submit( class HyperparameterTuningJob(jobs.HyperparameterTuningJob): - """Vertex AI Hyperparameter Tuning Job.""" + """Deprecated. Vertex AI Hyperparameter Tuning Job (preview).""" def __init__( self, @@ -492,7 +494,8 @@ def __init__( labels: Optional[Dict[str, str]] = None, encryption_spec_key_name: Optional[str] = None, ): - """ + """Deprecated. Please use the GA (non-preview) version of this class. + Configures a HyperparameterTuning Job. Example usage: diff --git a/google/cloud/aiplatform/preview/persistent_resource.py b/google/cloud/aiplatform/preview/persistent_resource.py index 0823af0db4..e06b08e90b 100644 --- a/google/cloud/aiplatform/preview/persistent_resource.py +++ b/google/cloud/aiplatform/preview/persistent_resource.py @@ -18,6 +18,7 @@ from typing import Dict, List, Optional, Union from google.api_core import operation +from google.api_core import retry from google.auth import credentials as auth_credentials from google.cloud.aiplatform import base from google.cloud.aiplatform import initializer @@ -25,20 +26,23 @@ from google.cloud.aiplatform.compat.services import ( persistent_resource_service_client_v1beta1 as persistent_resource_service_client_compat, ) -from google.cloud.aiplatform.compat.types import ( - persistent_resource_v1beta1 as gca_persistent_resource_compat, +from google.cloud.aiplatform_v1beta1.types import ( + encryption_spec as gca_encryption_spec_compat, ) from google.cloud.aiplatform_v1beta1.types import ( - encryption_spec as gca_encryption_spec, + persistent_resource as gca_persistent_resource_compat, ) + from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore + _LOGGER = base.Logger(__name__) +_DEFAULT_RETRY = retry.Retry() class PersistentResource(base.VertexAiResourceNounWithFutureManager): - """Managed PersistentResource feature for Vertex AI.""" + """Managed PersistentResource feature for Vertex AI (Preview).""" client_class = utils.PersistentResourceClientWithOverride _resource_noun = "persistentResource" @@ -194,7 +198,7 @@ def create( This corresponds to the ``persistent_resource_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - resource_pools (MutableSequence[google.cloud.aiplatform_v1beta1.types.ResourcePool]): + resource_pools (MutableSequence[google.cloud.aiplatform_v1.types.ResourcePool]): Required. The list of resource pools to create for the PersistentResource. display_name (str): @@ -294,7 +298,7 @@ def create( if kms_key_name: gca_persistent_resource.encryption_spec = ( - gca_encryption_spec.EncryptionSpec(kms_key_name=kms_key_name) + gca_encryption_spec_compat.EncryptionSpec(kms_key_name=kms_key_name) ) if service_account: @@ -307,7 +311,9 @@ def create( ) ) - api_client = cls._instantiate_client(location, credentials) + api_client = cls._instantiate_client(location, credentials).select_version( + "v1beta1" + ) create_lro = cls._create( api_client=api_client, parent=initializer.global_config.common_location_path( diff --git a/google/cloud/aiplatform/preview/vertex_ray/client_builder.py b/google/cloud/aiplatform/preview/vertex_ray/client_builder.py index bfe6fc4f9e..2ffd0c142f 100644 --- a/google/cloud/aiplatform/preview/vertex_ray/client_builder.py +++ b/google/cloud/aiplatform/preview/vertex_ray/client_builder.py @@ -129,16 +129,14 @@ def __init__(self, address: Optional[str]) -> None: ", but the requested cluster runtime has %s. Please " "ensure that the Ray versions match for client connectivity. You may " '"pip install --user --force-reinstall ray[default]==%s"' - " and restart runtime before cluster connection.", - local_ray_verion, - cluster.ray_version, - install_ray_version, + " and restart runtime before cluster connection." + % (local_ray_verion, cluster.ray_version, install_ray_version) ) else: logging.info( "[Ray on Vertex]: Local runtime has Ray version %s." - "Please ensure that the Ray versions match for client connectivity.", - local_ray_verion, + "Please ensure that the Ray versions match for client connectivity." + % local_ray_verion ) super().__init__(address) diff --git a/google/cloud/aiplatform/preview/vertex_ray/cluster_init.py b/google/cloud/aiplatform/preview/vertex_ray/cluster_init.py index 628e92c694..6f798c89f0 100644 --- a/google/cloud/aiplatform/preview/vertex_ray/cluster_init.py +++ b/google/cloud/aiplatform/preview/vertex_ray/cluster_init.py @@ -137,16 +137,14 @@ def create_ray_cluster( ", but the requested cluster runtime has %s. Please " "ensure that the Ray versions match for client connectivity. You may " '"pip install --user --force-reinstall ray[default]==%s"' - " and restart runtime before cluster connection.", - local_ray_verion, - ray_version, - install_ray_version, + " and restart runtime before cluster connection." + % (local_ray_verion, ray_version, install_ray_version) ) else: logging.info( "[Ray on Vertex]: Local runtime has Ray version %s." - "Please ensure that the Ray versions match for client connectivity.", - local_ray_verion, + "Please ensure that the Ray versions match for client connectivity." + % local_ray_verion ) if cluster_name is None: diff --git a/google/cloud/aiplatform/preview/vertex_ray/predict/xgboost/register.py b/google/cloud/aiplatform/preview/vertex_ray/predict/xgboost/register.py index 4ce5e9b003..eddc38564a 100644 --- a/google/cloud/aiplatform/preview/vertex_ray/predict/xgboost/register.py +++ b/google/cloud/aiplatform/preview/vertex_ray/predict/xgboost/register.py @@ -77,7 +77,7 @@ def register_xgboost( Optional. The display name of the Model. The name can be up to 128 characters long and can be consist of any UTF-8 characters. xgboost_version (str): Optional. The version of the XGBoost serving container. - Supported versions: ["0.82", "0.90", "1.1", "1.2", "1.3", "1.4", "1.6", "1.7", "2.0"]. + Supported versions: ["0.82", "0.90", "1.1", "1.2", "1.3", "1.4", "1.6", "1.7"]. If the version is not specified, the latest version is used. **kwargs: Any kwargs will be passed to aiplatform.Model registration. diff --git a/google/cloud/aiplatform/preview/vertex_ray/util/_gapic_utils.py b/google/cloud/aiplatform/preview/vertex_ray/util/_gapic_utils.py index aabfbd1171..4bed9101c7 100644 --- a/google/cloud/aiplatform/preview/vertex_ray/util/_gapic_utils.py +++ b/google/cloud/aiplatform/preview/vertex_ray/util/_gapic_utils.py @@ -47,7 +47,7 @@ def create_persistent_resource_client(): return initializer.global_config.create_client( client_class=PersistentResourceClientWithOverride, appended_gapic_version="vertex_ray", - ) + ).select_version("v1beta1") def polling_delay(num_attempts: int, time_scale: float) -> datetime.timedelta: @@ -109,7 +109,7 @@ def get_persistent_resource( ) if response: if response.error.message: - logging.error("[Ray on Vertex AI]: %s", response.error.message) + logging.error("[Ray on Vertex AI]: %s" % response.error.message) raise RuntimeError("[Ray on Vertex AI]: Cluster returned an error.") print("[Ray on Vertex AI]: Cluster State =", response.state) @@ -155,8 +155,8 @@ def persistent_resource_to_cluster( if not persistent_resource.resource_runtime_spec.ray_spec: # skip PersistentResource without RaySpec logging.info( - "[Ray on Vertex AI]: Cluster %s does not have Ray installed.", - persistent_resource.name, + "[Ray on Vertex AI]: Cluster %s does not have Ray installed." + % persistent_resource.name, ) return resource_pools = persistent_resource.resource_pools @@ -178,8 +178,9 @@ def persistent_resource_to_cluster( if _PRIVATE_PREVIEW_IMAGE in head_image_uri: # If using outdated images logging.info( - "[Ray on Vertex AI]: The image of cluster %s is outdated. It is recommended to delete and recreate the cluster to obtain the latest image.", - persistent_resource.name, + "[Ray on Vertex AI]: The image of cluster %s is outdated." + " It is recommended to delete and recreate the cluster to obtain" + " the latest image." % persistent_resource.name ) return None else: diff --git a/google/cloud/aiplatform/preview/vertex_ray/util/_validation_utils.py b/google/cloud/aiplatform/preview/vertex_ray/util/_validation_utils.py index 718e933393..2ad04c5d85 100644 --- a/google/cloud/aiplatform/preview/vertex_ray/util/_validation_utils.py +++ b/google/cloud/aiplatform/preview/vertex_ray/util/_validation_utils.py @@ -83,11 +83,13 @@ def get_image_uri(ray_version, python_version, enable_cuda): """Image uri for a given ray version and python version.""" if ray_version not in SUPPORTED_RAY_VERSIONS: raise ValueError( - "[Ray on Vertex AI]: The supported Ray versions are %s (%s) and %s (%s).", - list(SUPPORTED_RAY_VERSIONS.keys())[0], - list(SUPPORTED_RAY_VERSIONS.values())[0], - list(SUPPORTED_RAY_VERSIONS.keys())[1], - list(SUPPORTED_RAY_VERSIONS.values())[1], + "[Ray on Vertex AI]: The supported Ray versions are %s (%s) and %s (%s)." + % ( + list(SUPPORTED_RAY_VERSIONS.keys())[0], + list(SUPPORTED_RAY_VERSIONS.values())[0], + list(SUPPORTED_RAY_VERSIONS.keys())[1], + list(SUPPORTED_RAY_VERSIONS.values())[1], + ) ) if python_version not in SUPPORTED_PY_VERSION: raise ValueError("[Ray on Vertex AI]: The supported Python version is 3.10.") diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 35a10529b8..c5ee3896d0 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -1489,6 +1489,7 @@ def _prepare_training_task_inputs_and_output_dir( enable_dashboard_access: bool = False, tensorboard: Optional[str] = None, disable_retries: bool = False, + persistent_resource_id: Optional[str] = None, ) -> Tuple[Dict, str]: """Prepares training task inputs and output directory for custom job. @@ -1539,6 +1540,14 @@ def _prepare_training_task_inputs_and_output_dir( Indicates if the job should retry for internal errors after the job starts running. If True, overrides `restart_job_on_worker_restart` to False. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. + Returns: Training task inputs and Output directory for custom job. """ @@ -1565,6 +1574,8 @@ def _prepare_training_task_inputs_and_output_dir( training_task_inputs["enable_web_access"] = enable_web_access if enable_dashboard_access: training_task_inputs["enable_dashboard_access"] = enable_dashboard_access + if persistent_resource_id: + training_task_inputs["persistent_resource_id"] = persistent_resource_id if timeout or restart_job_on_worker_restart or disable_retries: timeout = f"{timeout}s" if timeout else None @@ -2962,6 +2973,7 @@ def run( sync=True, create_request_timeout: Optional[float] = None, disable_retries: bool = False, + persistent_resource_id: Optional[str] = None, ) -> Optional[models.Model]: """Runs the custom training job. @@ -3249,6 +3261,13 @@ def run( Indicates if the job should retry for internal errors after the job starts running. If True, overrides `restart_job_on_worker_restart` to False. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -3311,6 +3330,7 @@ def run( sync=sync, create_request_timeout=create_request_timeout, disable_retries=disable_retries, + persistent_resource_id=persistent_resource_id, ) def submit( @@ -3362,6 +3382,7 @@ def submit( sync=True, create_request_timeout: Optional[float] = None, disable_retries: bool = False, + persistent_resource_id: Optional[str] = None, ) -> Optional[models.Model]: """Submits the custom training job without blocking until completion. @@ -3649,6 +3670,13 @@ def submit( Indicates if the job should retry for internal errors after the job starts running. If True, overrides `restart_job_on_worker_restart` to False. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -3711,6 +3739,7 @@ def submit( create_request_timeout=create_request_timeout, block=False, disable_retries=disable_retries, + persistent_resource_id=persistent_resource_id, ) @base.optional_sync(construct_object_on_arg="managed_model") @@ -3757,6 +3786,7 @@ def _run( create_request_timeout: Optional[float] = None, block: Optional[bool] = True, disable_retries: bool = False, + persistent_resource_id: Optional[str] = None, ) -> Optional[models.Model]: """Packages local script and launches training_job. @@ -3946,6 +3976,13 @@ def _run( Indicates if the job should retry for internal errors after the job starts running. If True, overrides `restart_job_on_worker_restart` to False. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -3999,6 +4036,7 @@ def _run( enable_dashboard_access=enable_dashboard_access, tensorboard=tensorboard, disable_retries=disable_retries, + persistent_resource_id=persistent_resource_id, ) model = self._run_job( @@ -4321,6 +4359,7 @@ def run( sync=True, create_request_timeout: Optional[float] = None, disable_retries: bool = False, + persistent_resource_id: Optional[str] = None, ) -> Optional[models.Model]: """Runs the custom training job. @@ -4601,6 +4640,13 @@ def run( Indicates if the job should retry for internal errors after the job starts running. If True, overrides `restart_job_on_worker_restart` to False. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -4662,6 +4708,7 @@ def run( sync=sync, create_request_timeout=create_request_timeout, disable_retries=disable_retries, + persistent_resource_id=persistent_resource_id, ) def submit( @@ -4713,6 +4760,7 @@ def submit( sync=True, create_request_timeout: Optional[float] = None, disable_retries: bool = False, + persistent_resource_id: Optional[str] = None, ) -> Optional[models.Model]: """Submits the custom training job without blocking until completion. @@ -4993,6 +5041,13 @@ def submit( Indicates if the job should retry for internal errors after the job starts running. If True, overrides `restart_job_on_worker_restart` to False. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -5054,6 +5109,7 @@ def submit( create_request_timeout=create_request_timeout, block=False, disable_retries=disable_retries, + persistent_resource_id=persistent_resource_id, ) @base.optional_sync(construct_object_on_arg="managed_model") @@ -5099,6 +5155,7 @@ def _run( create_request_timeout: Optional[float] = None, block: Optional[bool] = True, disable_retries: bool = False, + persistent_resource_id: Optional[str] = None, ) -> Optional[models.Model]: """Packages local script and launches training_job. Args: @@ -5284,6 +5341,13 @@ def _run( Indicates if the job should retry for internal errors after the job starts running. If True, overrides `restart_job_on_worker_restart` to False. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -5331,6 +5395,7 @@ def _run( enable_dashboard_access=enable_dashboard_access, tensorboard=tensorboard, disable_retries=disable_retries, + persistent_resource_id=persistent_resource_id, ) model = self._run_job( @@ -7249,6 +7314,7 @@ def run( sync=True, create_request_timeout: Optional[float] = None, disable_retries: bool = False, + persistent_resource_id: Optional[str] = None, ) -> Optional[models.Model]: """Runs the custom training job. @@ -7530,6 +7596,13 @@ def run( Indicates if the job should retry for internal errors after the job starts running. If True, overrides `restart_job_on_worker_restart` to False. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -7586,6 +7659,7 @@ def run( sync=sync, create_request_timeout=create_request_timeout, disable_retries=disable_retries, + persistent_resource_id=persistent_resource_id, ) @base.optional_sync(construct_object_on_arg="managed_model") @@ -7630,6 +7704,7 @@ def _run( sync=True, create_request_timeout: Optional[float] = None, disable_retries: bool = False, + persistent_resource_id: Optional[str] = None, ) -> Optional[models.Model]: """Packages local script and launches training_job. @@ -7800,6 +7875,13 @@ def _run( Indicates if the job should retry for internal errors after the job starts running. If True, overrides `restart_job_on_worker_restart` to False. + persistent_resource_id (str): + Optional. The ID of the PersistentResource in the same Project + and Location. If this is specified, the job will be run on + existing machines held by the PersistentResource instead of + on-demand short-live machines. The network, CMEK, and node pool + configs on the job should be consistent with those on the + PersistentResource, otherwise, the job will be rejected. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -7847,6 +7929,7 @@ def _run( enable_dashboard_access=enable_dashboard_access, tensorboard=tensorboard, disable_retries=disable_retries, + persistent_resource_id=persistent_resource_id, ) model = self._run_job( diff --git a/google/cloud/aiplatform/utils/__init__.py b/google/cloud/aiplatform/utils/__init__.py index f7c4ee725e..b0e74e1278 100644 --- a/google/cloud/aiplatform/utils/__init__.py +++ b/google/cloud/aiplatform/utils/__init__.py @@ -39,6 +39,8 @@ dataset_service_client_v1beta1, deployment_resource_pool_service_client_v1beta1, endpoint_service_client_v1beta1, + extension_execution_service_client_v1beta1, + extension_registry_service_client_v1beta1, feature_online_store_admin_service_client_v1beta1, feature_online_store_service_client_v1beta1, featurestore_online_serving_service_client_v1beta1, @@ -79,6 +81,7 @@ schedule_service_client_v1, tensorboard_service_client_v1, vizier_service_client_v1, + persistent_resource_service_client_v1, ) from google.cloud.aiplatform.compat.types import ( @@ -554,6 +557,28 @@ class EndpointClientWithOverride(ClientWithOverride): ) +class ExtensionExecutionClientWithOverride(ClientWithOverride): + _is_temporary = True + _default_version = compat.V1BETA1 + _version_map = ( + ( + compat.V1BETA1, + extension_execution_service_client_v1beta1.ExtensionExecutionServiceClient, + ), + ) + + +class ExtensionRegistryClientWithOverride(ClientWithOverride): + _is_temporary = True + _default_version = compat.V1BETA1 + _version_map = ( + ( + compat.V1BETA1, + extension_registry_service_client_v1beta1.ExtensionRegistryServiceClient, + ), + ) + + class IndexClientWithOverride(ClientWithOverride): _is_temporary = True _default_version = compat.DEFAULT_VERSION @@ -739,8 +764,12 @@ class ModelGardenClientWithOverride(ClientWithOverride): class PersistentResourceClientWithOverride(ClientWithOverride): _is_temporary = True - _default_version = compat.V1BETA1 + _default_version = compat.DEFAULT_VERSION _version_map = ( + ( + compat.V1, + persistent_resource_service_client_v1.PersistentResourceServiceClient, + ), ( compat.V1BETA1, persistent_resource_service_client_v1beta1.PersistentResourceServiceClient, diff --git a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py index 2618598ae7..167b2fa3c1 100644 --- a/google/cloud/aiplatform/version.py +++ b/google/cloud/aiplatform/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.46.0" +__version__ = "1.47.0" diff --git a/google/cloud/aiplatform_v1/__init__.py b/google/cloud/aiplatform_v1/__init__.py index cc1c2e5ff0..86f821a12a 100644 --- a/google/cloud/aiplatform_v1/__init__.py +++ b/google/cloud/aiplatform_v1/__init__.py @@ -46,6 +46,8 @@ ) from .services.featurestore_service import FeaturestoreServiceClient from .services.featurestore_service import FeaturestoreServiceAsyncClient +from .services.gen_ai_tuning_service import GenAiTuningServiceClient +from .services.gen_ai_tuning_service import GenAiTuningServiceAsyncClient from .services.index_endpoint_service import IndexEndpointServiceClient from .services.index_endpoint_service import IndexEndpointServiceAsyncClient from .services.index_service import IndexServiceClient @@ -64,6 +66,10 @@ from .services.model_garden_service import ModelGardenServiceAsyncClient from .services.model_service import ModelServiceClient from .services.model_service import ModelServiceAsyncClient +from .services.notebook_service import NotebookServiceClient +from .services.notebook_service import NotebookServiceAsyncClient +from .services.persistent_resource_service import PersistentResourceServiceClient +from .services.persistent_resource_service import PersistentResourceServiceAsyncClient from .services.pipeline_service import PipelineServiceClient from .services.pipeline_service import PipelineServiceAsyncClient from .services.prediction_service import PredictionServiceClient @@ -314,6 +320,11 @@ from .types.featurestore_service import UpdateFeatureRequest from .types.featurestore_service import UpdateFeaturestoreOperationMetadata from .types.featurestore_service import UpdateFeaturestoreRequest +from .types.genai_tuning_service import CancelTuningJobRequest +from .types.genai_tuning_service import CreateTuningJobRequest +from .types.genai_tuning_service import GetTuningJobRequest +from .types.genai_tuning_service import ListTuningJobsRequest +from .types.genai_tuning_service import ListTuningJobsResponse from .types.hyperparameter_tuning_job import HyperparameterTuningJob from .types.index import Index from .types.index import IndexDatapoint @@ -418,6 +429,7 @@ from .types.machine_resources import NfsMount from .types.machine_resources import PersistentDiskSpec from .types.machine_resources import ResourcesConsumed +from .types.machine_resources import ShieldedVmConfig from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters from .types.match_service import FindNeighborsRequest from .types.match_service import FindNeighborsResponse @@ -550,10 +562,51 @@ from .types.nas_job import NasJobSpec from .types.nas_job import NasTrial from .types.nas_job import NasTrialDetail +from .types.network_spec import NetworkSpec +from .types.notebook_euc_config import NotebookEucConfig +from .types.notebook_idle_shutdown_config import NotebookIdleShutdownConfig +from .types.notebook_runtime import NotebookRuntime +from .types.notebook_runtime import NotebookRuntimeTemplate +from .types.notebook_runtime import NotebookRuntimeType +from .types.notebook_runtime_template_ref import NotebookRuntimeTemplateRef +from .types.notebook_service import AssignNotebookRuntimeOperationMetadata +from .types.notebook_service import AssignNotebookRuntimeRequest +from .types.notebook_service import CreateNotebookRuntimeTemplateOperationMetadata +from .types.notebook_service import CreateNotebookRuntimeTemplateRequest +from .types.notebook_service import DeleteNotebookRuntimeRequest +from .types.notebook_service import DeleteNotebookRuntimeTemplateRequest +from .types.notebook_service import GetNotebookRuntimeRequest +from .types.notebook_service import GetNotebookRuntimeTemplateRequest +from .types.notebook_service import ListNotebookRuntimesRequest +from .types.notebook_service import ListNotebookRuntimesResponse +from .types.notebook_service import ListNotebookRuntimeTemplatesRequest +from .types.notebook_service import ListNotebookRuntimeTemplatesResponse +from .types.notebook_service import StartNotebookRuntimeOperationMetadata +from .types.notebook_service import StartNotebookRuntimeRequest +from .types.notebook_service import StartNotebookRuntimeResponse +from .types.notebook_service import UpgradeNotebookRuntimeOperationMetadata +from .types.notebook_service import UpgradeNotebookRuntimeRequest +from .types.notebook_service import UpgradeNotebookRuntimeResponse from .types.openapi import Schema from .types.openapi import Type from .types.operation import DeleteOperationMetadata from .types.operation import GenericOperationMetadata +from .types.persistent_resource import PersistentResource +from .types.persistent_resource import RaySpec +from .types.persistent_resource import ResourcePool +from .types.persistent_resource import ResourceRuntime +from .types.persistent_resource import ResourceRuntimeSpec +from .types.persistent_resource import ServiceAccountSpec +from .types.persistent_resource_service import CreatePersistentResourceOperationMetadata +from .types.persistent_resource_service import CreatePersistentResourceRequest +from .types.persistent_resource_service import DeletePersistentResourceRequest +from .types.persistent_resource_service import GetPersistentResourceRequest +from .types.persistent_resource_service import ListPersistentResourcesRequest +from .types.persistent_resource_service import ListPersistentResourcesResponse +from .types.persistent_resource_service import RebootPersistentResourceOperationMetadata +from .types.persistent_resource_service import RebootPersistentResourceRequest +from .types.persistent_resource_service import UpdatePersistentResourceOperationMetadata +from .types.persistent_resource_service import UpdatePersistentResourceRequest from .types.pipeline_failure_policy import PipelineFailurePolicy from .types.pipeline_job import PipelineJob from .types.pipeline_job import PipelineJobDetail @@ -698,6 +751,13 @@ from .types.training_pipeline import StratifiedSplit from .types.training_pipeline import TimestampSplit from .types.training_pipeline import TrainingPipeline +from .types.tuning_job import SupervisedHyperParameters +from .types.tuning_job import SupervisedTuningDatasetDistribution +from .types.tuning_job import SupervisedTuningDataStats +from .types.tuning_job import SupervisedTuningSpec +from .types.tuning_job import TunedModel +from .types.tuning_job import TuningDataStats +from .types.tuning_job import TuningJob from .types.types import BoolArray from .types.types import DoubleArray from .types.types import Int64Array @@ -738,6 +798,7 @@ "FeatureRegistryServiceAsyncClient", "FeaturestoreOnlineServingServiceAsyncClient", "FeaturestoreServiceAsyncClient", + "GenAiTuningServiceAsyncClient", "IndexEndpointServiceAsyncClient", "IndexServiceAsyncClient", "JobServiceAsyncClient", @@ -747,6 +808,8 @@ "MigrationServiceAsyncClient", "ModelGardenServiceAsyncClient", "ModelServiceAsyncClient", + "NotebookServiceAsyncClient", + "PersistentResourceServiceAsyncClient", "PipelineServiceAsyncClient", "PredictionServiceAsyncClient", "ScheduleServiceAsyncClient", @@ -765,6 +828,8 @@ "Annotation", "AnnotationSpec", "Artifact", + "AssignNotebookRuntimeOperationMetadata", + "AssignNotebookRuntimeRequest", "Attribution", "AutomaticResources", "AutoscalingMetricSpec", @@ -807,6 +872,7 @@ "CancelNasJobRequest", "CancelPipelineJobRequest", "CancelTrainingPipelineRequest", + "CancelTuningJobRequest", "Candidate", "CheckTrialEarlyStoppingStateMetatdata", "CheckTrialEarlyStoppingStateRequest", @@ -862,6 +928,10 @@ "CreateMetadataStoreRequest", "CreateModelDeploymentMonitoringJobRequest", "CreateNasJobRequest", + "CreateNotebookRuntimeTemplateOperationMetadata", + "CreateNotebookRuntimeTemplateRequest", + "CreatePersistentResourceOperationMetadata", + "CreatePersistentResourceRequest", "CreatePipelineJobRequest", "CreateRegistryFeatureOperationMetadata", "CreateScheduleRequest", @@ -875,6 +945,7 @@ "CreateTensorboardTimeSeriesRequest", "CreateTrainingPipelineRequest", "CreateTrialRequest", + "CreateTuningJobRequest", "CsvDestination", "CsvSource", "CustomJob", @@ -914,7 +985,10 @@ "DeleteModelRequest", "DeleteModelVersionRequest", "DeleteNasJobRequest", + "DeleteNotebookRuntimeRequest", + "DeleteNotebookRuntimeTemplateRequest", "DeleteOperationMetadata", + "DeletePersistentResourceRequest", "DeletePipelineJobRequest", "DeleteSavedQueryRequest", "DeleteScheduleRequest", @@ -1014,6 +1088,7 @@ "FunctionResponse", "GcsDestination", "GcsSource", + "GenAiTuningServiceClient", "GenerateContentRequest", "GenerateContentResponse", "GenerationConfig", @@ -1048,6 +1123,9 @@ "GetModelRequest", "GetNasJobRequest", "GetNasTrialDetailRequest", + "GetNotebookRuntimeRequest", + "GetNotebookRuntimeTemplateRequest", + "GetPersistentResourceRequest", "GetPipelineJobRequest", "GetPublisherModelRequest", "GetScheduleRequest", @@ -1059,6 +1137,7 @@ "GetTensorboardTimeSeriesRequest", "GetTrainingPipelineRequest", "GetTrialRequest", + "GetTuningJobRequest", "GoogleSearchRetrieval", "GroundingAttribution", "GroundingMetadata", @@ -1149,8 +1228,14 @@ "ListNasJobsResponse", "ListNasTrialDetailsRequest", "ListNasTrialDetailsResponse", + "ListNotebookRuntimeTemplatesRequest", + "ListNotebookRuntimeTemplatesResponse", + "ListNotebookRuntimesRequest", + "ListNotebookRuntimesResponse", "ListOptimalTrialsRequest", "ListOptimalTrialsResponse", + "ListPersistentResourcesRequest", + "ListPersistentResourcesResponse", "ListPipelineJobsRequest", "ListPipelineJobsResponse", "ListSavedQueriesRequest", @@ -1173,6 +1258,8 @@ "ListTrainingPipelinesResponse", "ListTrialsRequest", "ListTrialsResponse", + "ListTuningJobsRequest", + "ListTuningJobsResponse", "LlmUtilityServiceClient", "LookupStudyRequest", "MachineSpec", @@ -1219,11 +1306,21 @@ "NearestNeighborSearchOperationMetadata", "NearestNeighbors", "Neighbor", + "NetworkSpec", "NfsMount", + "NotebookEucConfig", + "NotebookIdleShutdownConfig", + "NotebookRuntime", + "NotebookRuntimeTemplate", + "NotebookRuntimeTemplateRef", + "NotebookRuntimeType", + "NotebookServiceClient", "Part", "PauseModelDeploymentMonitoringJobRequest", "PauseScheduleRequest", "PersistentDiskSpec", + "PersistentResource", + "PersistentResourceServiceClient", "PipelineFailurePolicy", "PipelineJob", "PipelineJobDetail", @@ -1262,6 +1359,7 @@ "QueryDeployedModelsResponse", "QueryExecutionInputsAndOutputsRequest", "RawPredictRequest", + "RaySpec", "ReadFeatureValuesRequest", "ReadFeatureValuesResponse", "ReadIndexDatapointsRequest", @@ -1274,10 +1372,15 @@ "ReadTensorboardTimeSeriesDataResponse", "ReadTensorboardUsageRequest", "ReadTensorboardUsageResponse", + "RebootPersistentResourceOperationMetadata", + "RebootPersistentResourceRequest", "RemoveContextChildrenRequest", "RemoveContextChildrenResponse", "RemoveDatapointsRequest", "RemoveDatapointsResponse", + "ResourcePool", + "ResourceRuntime", + "ResourceRuntimeSpec", "ResourcesConsumed", "RestoreDatasetVersionOperationMetadata", "RestoreDatasetVersionRequest", @@ -1306,9 +1409,14 @@ "SearchNearestEntitiesRequest", "SearchNearestEntitiesResponse", "Segment", + "ServiceAccountSpec", + "ShieldedVmConfig", "SmoothGradConfig", "SpecialistPool", "SpecialistPoolServiceClient", + "StartNotebookRuntimeOperationMetadata", + "StartNotebookRuntimeRequest", + "StartNotebookRuntimeResponse", "StopTrialRequest", "StratifiedSplit", "StreamDirectPredictRequest", @@ -1328,6 +1436,10 @@ "SuggestTrialsMetadata", "SuggestTrialsRequest", "SuggestTrialsResponse", + "SupervisedHyperParameters", + "SupervisedTuningDataStats", + "SupervisedTuningDatasetDistribution", + "SupervisedTuningSpec", "SyncFeatureViewRequest", "SyncFeatureViewResponse", "TFRecordDestination", @@ -1350,6 +1462,9 @@ "TrainingPipeline", "Trial", "TrialContext", + "TunedModel", + "TuningDataStats", + "TuningJob", "Type", "UndeployIndexOperationMetadata", "UndeployIndexRequest", @@ -1384,6 +1499,8 @@ "UpdateModelDeploymentMonitoringJobOperationMetadata", "UpdateModelDeploymentMonitoringJobRequest", "UpdateModelRequest", + "UpdatePersistentResourceOperationMetadata", + "UpdatePersistentResourceRequest", "UpdateScheduleRequest", "UpdateSpecialistPoolOperationMetadata", "UpdateSpecialistPoolRequest", @@ -1392,6 +1509,9 @@ "UpdateTensorboardRequest", "UpdateTensorboardRunRequest", "UpdateTensorboardTimeSeriesRequest", + "UpgradeNotebookRuntimeOperationMetadata", + "UpgradeNotebookRuntimeRequest", + "UpgradeNotebookRuntimeResponse", "UploadModelOperationMetadata", "UploadModelRequest", "UploadModelResponse", diff --git a/google/cloud/aiplatform_v1/gapic_metadata.json b/google/cloud/aiplatform_v1/gapic_metadata.json index 8f4776fb91..bd99ce18bc 100644 --- a/google/cloud/aiplatform_v1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1/gapic_metadata.json @@ -1357,6 +1357,85 @@ } } }, + "GenAiTuningService": { + "clients": { + "grpc": { + "libraryClient": "GenAiTuningServiceClient", + "rpcs": { + "CancelTuningJob": { + "methods": [ + "cancel_tuning_job" + ] + }, + "CreateTuningJob": { + "methods": [ + "create_tuning_job" + ] + }, + "GetTuningJob": { + "methods": [ + "get_tuning_job" + ] + }, + "ListTuningJobs": { + "methods": [ + "list_tuning_jobs" + ] + } + } + }, + "grpc-async": { + "libraryClient": "GenAiTuningServiceAsyncClient", + "rpcs": { + "CancelTuningJob": { + "methods": [ + "cancel_tuning_job" + ] + }, + "CreateTuningJob": { + "methods": [ + "create_tuning_job" + ] + }, + "GetTuningJob": { + "methods": [ + "get_tuning_job" + ] + }, + "ListTuningJobs": { + "methods": [ + "list_tuning_jobs" + ] + } + } + }, + "rest": { + "libraryClient": "GenAiTuningServiceClient", + "rpcs": { + "CancelTuningJob": { + "methods": [ + "cancel_tuning_job" + ] + }, + "CreateTuningJob": { + "methods": [ + "create_tuning_job" + ] + }, + "GetTuningJob": { + "methods": [ + "get_tuning_job" + ] + }, + "ListTuningJobs": { + "methods": [ + "list_tuning_jobs" + ] + } + } + } + } + }, "IndexEndpointService": { "clients": { "grpc": { @@ -3133,6 +3212,284 @@ } } }, + "NotebookService": { + "clients": { + "grpc": { + "libraryClient": "NotebookServiceClient", + "rpcs": { + "AssignNotebookRuntime": { + "methods": [ + "assign_notebook_runtime" + ] + }, + "CreateNotebookRuntimeTemplate": { + "methods": [ + "create_notebook_runtime_template" + ] + }, + "DeleteNotebookRuntime": { + "methods": [ + "delete_notebook_runtime" + ] + }, + "DeleteNotebookRuntimeTemplate": { + "methods": [ + "delete_notebook_runtime_template" + ] + }, + "GetNotebookRuntime": { + "methods": [ + "get_notebook_runtime" + ] + }, + "GetNotebookRuntimeTemplate": { + "methods": [ + "get_notebook_runtime_template" + ] + }, + "ListNotebookRuntimeTemplates": { + "methods": [ + "list_notebook_runtime_templates" + ] + }, + "ListNotebookRuntimes": { + "methods": [ + "list_notebook_runtimes" + ] + }, + "StartNotebookRuntime": { + "methods": [ + "start_notebook_runtime" + ] + }, + "UpgradeNotebookRuntime": { + "methods": [ + "upgrade_notebook_runtime" + ] + } + } + }, + "grpc-async": { + "libraryClient": "NotebookServiceAsyncClient", + "rpcs": { + "AssignNotebookRuntime": { + "methods": [ + "assign_notebook_runtime" + ] + }, + "CreateNotebookRuntimeTemplate": { + "methods": [ + "create_notebook_runtime_template" + ] + }, + "DeleteNotebookRuntime": { + "methods": [ + "delete_notebook_runtime" + ] + }, + "DeleteNotebookRuntimeTemplate": { + "methods": [ + "delete_notebook_runtime_template" + ] + }, + "GetNotebookRuntime": { + "methods": [ + "get_notebook_runtime" + ] + }, + "GetNotebookRuntimeTemplate": { + "methods": [ + "get_notebook_runtime_template" + ] + }, + "ListNotebookRuntimeTemplates": { + "methods": [ + "list_notebook_runtime_templates" + ] + }, + "ListNotebookRuntimes": { + "methods": [ + "list_notebook_runtimes" + ] + }, + "StartNotebookRuntime": { + "methods": [ + "start_notebook_runtime" + ] + }, + "UpgradeNotebookRuntime": { + "methods": [ + "upgrade_notebook_runtime" + ] + } + } + }, + "rest": { + "libraryClient": "NotebookServiceClient", + "rpcs": { + "AssignNotebookRuntime": { + "methods": [ + "assign_notebook_runtime" + ] + }, + "CreateNotebookRuntimeTemplate": { + "methods": [ + "create_notebook_runtime_template" + ] + }, + "DeleteNotebookRuntime": { + "methods": [ + "delete_notebook_runtime" + ] + }, + "DeleteNotebookRuntimeTemplate": { + "methods": [ + "delete_notebook_runtime_template" + ] + }, + "GetNotebookRuntime": { + "methods": [ + "get_notebook_runtime" + ] + }, + "GetNotebookRuntimeTemplate": { + "methods": [ + "get_notebook_runtime_template" + ] + }, + "ListNotebookRuntimeTemplates": { + "methods": [ + "list_notebook_runtime_templates" + ] + }, + "ListNotebookRuntimes": { + "methods": [ + "list_notebook_runtimes" + ] + }, + "StartNotebookRuntime": { + "methods": [ + "start_notebook_runtime" + ] + }, + "UpgradeNotebookRuntime": { + "methods": [ + "upgrade_notebook_runtime" + ] + } + } + } + } + }, + "PersistentResourceService": { + "clients": { + "grpc": { + "libraryClient": "PersistentResourceServiceClient", + "rpcs": { + "CreatePersistentResource": { + "methods": [ + "create_persistent_resource" + ] + }, + "DeletePersistentResource": { + "methods": [ + "delete_persistent_resource" + ] + }, + "GetPersistentResource": { + "methods": [ + "get_persistent_resource" + ] + }, + "ListPersistentResources": { + "methods": [ + "list_persistent_resources" + ] + }, + "RebootPersistentResource": { + "methods": [ + "reboot_persistent_resource" + ] + }, + "UpdatePersistentResource": { + "methods": [ + "update_persistent_resource" + ] + } + } + }, + "grpc-async": { + "libraryClient": "PersistentResourceServiceAsyncClient", + "rpcs": { + "CreatePersistentResource": { + "methods": [ + "create_persistent_resource" + ] + }, + "DeletePersistentResource": { + "methods": [ + "delete_persistent_resource" + ] + }, + "GetPersistentResource": { + "methods": [ + "get_persistent_resource" + ] + }, + "ListPersistentResources": { + "methods": [ + "list_persistent_resources" + ] + }, + "RebootPersistentResource": { + "methods": [ + "reboot_persistent_resource" + ] + }, + "UpdatePersistentResource": { + "methods": [ + "update_persistent_resource" + ] + } + } + }, + "rest": { + "libraryClient": "PersistentResourceServiceClient", + "rpcs": { + "CreatePersistentResource": { + "methods": [ + "create_persistent_resource" + ] + }, + "DeletePersistentResource": { + "methods": [ + "delete_persistent_resource" + ] + }, + "GetPersistentResource": { + "methods": [ + "get_persistent_resource" + ] + }, + "ListPersistentResources": { + "methods": [ + "list_persistent_resources" + ] + }, + "RebootPersistentResource": { + "methods": [ + "reboot_persistent_resource" + ] + }, + "UpdatePersistentResource": { + "methods": [ + "update_persistent_resource" + ] + } + } + } + } + }, "PipelineService": { "clients": { "grpc": { diff --git a/google/cloud/aiplatform_v1/gapic_version.py b/google/cloud/aiplatform_v1/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform_v1/gapic_version.py +++ b/google/cloud/aiplatform_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py index c85a91345a..6f87d5a149 100644 --- a/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/dataset_service/transports/rest.py @@ -1093,6 +1093,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1233,6 +1237,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1387,6 +1395,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1551,6 +1563,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1717,6 +1733,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1885,6 +1905,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2059,6 +2083,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2161,7 +2189,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2215,6 +2243,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2389,6 +2421,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2541,6 +2577,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -4994,6 +5034,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5134,6 +5178,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5345,6 +5393,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5509,6 +5561,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -5735,6 +5791,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5903,6 +5963,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -6138,6 +6202,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -6240,7 +6308,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -6294,6 +6362,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -6529,6 +6601,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -6681,6 +6757,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py index e30eaa04f2..0a16d9bc97 100644 --- a/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/deployment_resource_pool_service/transports/rest.py @@ -710,6 +710,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -850,6 +854,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1004,6 +1012,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1168,6 +1180,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1334,6 +1350,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1502,6 +1522,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1676,6 +1700,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -1778,7 +1806,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -1832,6 +1860,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2006,6 +2038,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2158,6 +2194,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3378,6 +3418,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3518,6 +3562,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3729,6 +3777,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3893,6 +3945,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4119,6 +4175,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4287,6 +4347,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4522,6 +4586,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4624,7 +4692,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -4678,6 +4746,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4913,6 +4985,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5065,6 +5141,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py index daccab8f2c..ee917899ac 100644 --- a/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/endpoint_service/transports/rest.py @@ -782,6 +782,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -922,6 +926,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1076,6 +1084,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1240,6 +1252,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1406,6 +1422,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1574,6 +1594,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1748,6 +1772,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -1850,7 +1878,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -1904,6 +1932,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2078,6 +2110,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2230,6 +2266,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3710,6 +3750,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3850,6 +3894,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4061,6 +4109,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4225,6 +4277,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4451,6 +4507,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4619,6 +4679,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4854,6 +4918,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4956,7 +5024,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -5010,6 +5078,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5245,6 +5317,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5397,6 +5473,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py index 42e04838e0..9355094000 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_admin_service/transports/rest.py @@ -985,6 +985,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1125,6 +1129,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1279,6 +1287,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1443,6 +1455,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1609,6 +1625,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1777,6 +1797,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1951,6 +1975,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2053,7 +2081,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2107,6 +2135,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2281,6 +2313,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2433,6 +2469,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -4499,6 +4539,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4639,6 +4683,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4850,6 +4898,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5014,6 +5066,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -5240,6 +5296,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5408,6 +5468,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -5643,6 +5707,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5745,7 +5813,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -5799,6 +5867,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -6034,6 +6106,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -6186,6 +6262,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest.py b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest.py index cc355b93c1..02826f7715 100644 --- a/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/feature_online_store_service/transports/rest.py @@ -1318,6 +1318,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1458,6 +1462,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1669,6 +1677,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1833,6 +1845,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2059,6 +2075,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2227,6 +2247,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2462,6 +2486,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2564,7 +2592,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2618,6 +2646,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2853,6 +2885,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3005,6 +3041,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py index 855bedfda8..1c4f1cacc7 100644 --- a/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/feature_registry_service/transports/rest.py @@ -856,6 +856,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -996,6 +1000,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1150,6 +1158,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1314,6 +1326,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1480,6 +1496,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1648,6 +1668,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1822,6 +1846,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -1924,7 +1952,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -1978,6 +2006,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2152,6 +2184,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2304,6 +2340,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3999,6 +4039,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4139,6 +4183,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4350,6 +4398,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4514,6 +4566,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4740,6 +4796,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4908,6 +4968,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -5143,6 +5207,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5245,7 +5313,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -5299,6 +5367,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5534,6 +5606,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5686,6 +5762,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py index cf406f7b05..f4e9ba9a0a 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/featurestore_online_serving_service/transports/rest.py @@ -1460,6 +1460,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1600,6 +1604,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1811,6 +1819,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1975,6 +1987,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2201,6 +2217,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2369,6 +2389,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2604,6 +2628,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2706,7 +2734,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2760,6 +2788,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2995,6 +3027,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3147,6 +3183,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py index a9d2ef484c..b34f18d348 100644 --- a/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/featurestore_service/transports/rest.py @@ -1205,6 +1205,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1345,6 +1349,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1499,6 +1507,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1663,6 +1675,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1829,6 +1845,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1997,6 +2017,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2171,6 +2195,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2273,7 +2301,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2327,6 +2355,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2501,6 +2533,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2653,6 +2689,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5479,6 +5519,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5619,6 +5663,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5830,6 +5878,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5994,6 +6046,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -6220,6 +6276,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -6388,6 +6448,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -6623,6 +6687,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -6725,7 +6793,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -6779,6 +6847,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -7014,6 +7086,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -7166,6 +7242,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/__init__.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/__init__.py new file mode 100644 index 0000000000..5d13bd02e9 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import GenAiTuningServiceClient +from .async_client import GenAiTuningServiceAsyncClient + +__all__ = ( + "GenAiTuningServiceClient", + "GenAiTuningServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py new file mode 100644 index 0000000000..22873bd4c3 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/async_client.py @@ -0,0 +1,1455 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import pagers +from google.cloud.aiplatform_v1.types import genai_tuning_service +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import tuning_job +from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import GenAiTuningServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import GenAiTuningServiceGrpcAsyncIOTransport +from .client import GenAiTuningServiceClient + + +class GenAiTuningServiceAsyncClient: + """A service for creating and managing GenAI Tuning Jobs.""" + + _client: GenAiTuningServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = GenAiTuningServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = GenAiTuningServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = GenAiTuningServiceClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = GenAiTuningServiceClient._DEFAULT_UNIVERSE + + context_path = staticmethod(GenAiTuningServiceClient.context_path) + parse_context_path = staticmethod(GenAiTuningServiceClient.parse_context_path) + endpoint_path = staticmethod(GenAiTuningServiceClient.endpoint_path) + parse_endpoint_path = staticmethod(GenAiTuningServiceClient.parse_endpoint_path) + model_path = staticmethod(GenAiTuningServiceClient.model_path) + parse_model_path = staticmethod(GenAiTuningServiceClient.parse_model_path) + tuning_job_path = staticmethod(GenAiTuningServiceClient.tuning_job_path) + parse_tuning_job_path = staticmethod(GenAiTuningServiceClient.parse_tuning_job_path) + common_billing_account_path = staticmethod( + GenAiTuningServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + GenAiTuningServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(GenAiTuningServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + GenAiTuningServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + GenAiTuningServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + GenAiTuningServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(GenAiTuningServiceClient.common_project_path) + parse_common_project_path = staticmethod( + GenAiTuningServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(GenAiTuningServiceClient.common_location_path) + parse_common_location_path = staticmethod( + GenAiTuningServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenAiTuningServiceAsyncClient: The constructed client. + """ + return GenAiTuningServiceClient.from_service_account_info.__func__(GenAiTuningServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenAiTuningServiceAsyncClient: The constructed client. + """ + return GenAiTuningServiceClient.from_service_account_file.__func__(GenAiTuningServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return GenAiTuningServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> GenAiTuningServiceTransport: + """Returns the transport used by the client instance. + + Returns: + GenAiTuningServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = functools.partial( + type(GenAiTuningServiceClient).get_transport_class, + type(GenAiTuningServiceClient), + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, GenAiTuningServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the gen ai tuning service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.GenAiTuningServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = GenAiTuningServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_tuning_job( + self, + request: Optional[ + Union[genai_tuning_service.CreateTuningJobRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + tuning_job: Optional[gca_tuning_job.TuningJob] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tuning_job.TuningJob: + r"""Creates a TuningJob. A created TuningJob right away + will be attempted to be run. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_create_tuning_job(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceAsyncClient() + + # Initialize request argument(s) + tuning_job = aiplatform_v1.TuningJob() + tuning_job.base_model = "base_model_value" + tuning_job.supervised_tuning_spec.training_dataset_uri = "training_dataset_uri_value" + + request = aiplatform_v1.CreateTuningJobRequest( + parent="parent_value", + tuning_job=tuning_job, + ) + + # Make the request + response = await client.create_tuning_job(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.CreateTuningJobRequest, dict]]): + The request object. Request message for + [GenAiTuningService.CreateTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.CreateTuningJob]. + parent (:class:`str`): + Required. The resource name of the Location to create + the TuningJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tuning_job (:class:`google.cloud.aiplatform_v1.types.TuningJob`): + Required. The TuningJob to create. + This corresponds to the ``tuning_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TuningJob: + Represents a TuningJob that runs with + Google owned models. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tuning_job]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = genai_tuning_service.CreateTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tuning_job is not None: + request.tuning_job = tuning_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_tuning_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_tuning_job( + self, + request: Optional[Union[genai_tuning_service.GetTuningJobRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tuning_job.TuningJob: + r"""Gets a TuningJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_get_tuning_job(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTuningJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tuning_job(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.GetTuningJobRequest, dict]]): + The request object. Request message for + [GenAiTuningService.GetTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.GetTuningJob]. + name (:class:`str`): + Required. The name of the TuningJob resource. Format: + ``projects/{project}/locations/{location}/tuningJobs/{tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TuningJob: + Represents a TuningJob that runs with + Google owned models. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = genai_tuning_service.GetTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_tuning_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_tuning_jobs( + self, + request: Optional[ + Union[genai_tuning_service.ListTuningJobsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTuningJobsAsyncPager: + r"""Lists TuningJobs in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_list_tuning_jobs(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTuningJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tuning_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.ListTuningJobsRequest, dict]]): + The request object. Request message for + [GenAiTuningService.ListTuningJobs][google.cloud.aiplatform.v1.GenAiTuningService.ListTuningJobs]. + parent (:class:`str`): + Required. The resource name of the Location to list the + TuningJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.gen_ai_tuning_service.pagers.ListTuningJobsAsyncPager: + Response message for + [GenAiTuningService.ListTuningJobs][google.cloud.aiplatform.v1.GenAiTuningService.ListTuningJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = genai_tuning_service.ListTuningJobsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tuning_jobs, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTuningJobsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def cancel_tuning_job( + self, + request: Optional[ + Union[genai_tuning_service.CancelTuningJobRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a TuningJob. Starts asynchronous cancellation on the + TuningJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [GenAiTuningService.GetTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.GetTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the TuningJob is not deleted; instead it becomes a + job with a + [TuningJob.error][google.cloud.aiplatform.v1.TuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TuningJob.state][google.cloud.aiplatform.v1.TuningJob.state] is + set to ``CANCELLED``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_cancel_tuning_job(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelTuningJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_tuning_job(request=request) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.CancelTuningJobRequest, dict]]): + The request object. Request message for + [GenAiTuningService.CancelTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.CancelTuningJob]. + name (:class:`str`): + Required. The name of the TuningJob to cancel. Format: + ``projects/{project}/locations/{location}/tuningJobs/{tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = genai_tuning_service.CancelTuningJobRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_tuning_job, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "GenAiTuningServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("GenAiTuningServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py new file mode 100644 index 0000000000..38dcca44fc --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/client.py @@ -0,0 +1,1936 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import pagers +from google.cloud.aiplatform_v1.types import genai_tuning_service +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import tuning_job +from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import GenAiTuningServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import GenAiTuningServiceGrpcTransport +from .transports.grpc_asyncio import GenAiTuningServiceGrpcAsyncIOTransport +from .transports.rest import GenAiTuningServiceRestTransport + + +class GenAiTuningServiceClientMeta(type): + """Metaclass for the GenAiTuningService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[GenAiTuningServiceTransport]] + _transport_registry["grpc"] = GenAiTuningServiceGrpcTransport + _transport_registry["grpc_asyncio"] = GenAiTuningServiceGrpcAsyncIOTransport + _transport_registry["rest"] = GenAiTuningServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[GenAiTuningServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class GenAiTuningServiceClient(metaclass=GenAiTuningServiceClientMeta): + """A service for creating and managing GenAI Tuning Jobs.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenAiTuningServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GenAiTuningServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> GenAiTuningServiceTransport: + """Returns the transport used by the client instance. + + Returns: + GenAiTuningServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def context_path( + project: str, + location: str, + metadata_store: str, + context: str, + ) -> str: + """Returns a fully-qualified context string.""" + return "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( + project=project, + location=location, + metadata_store=metadata_store, + context=context, + ) + + @staticmethod + def parse_context_path(path: str) -> Dict[str, str]: + """Parses a context path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/metadataStores/(?P.+?)/contexts/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def endpoint_path( + project: str, + location: str, + endpoint: str, + ) -> str: + """Returns a fully-qualified endpoint string.""" + return "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, + location=location, + endpoint=endpoint, + ) + + @staticmethod + def parse_endpoint_path(path: str) -> Dict[str, str]: + """Parses a endpoint path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/endpoints/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def model_path( + project: str, + location: str, + model: str, + ) -> str: + """Returns a fully-qualified model string.""" + return "projects/{project}/locations/{location}/models/{model}".format( + project=project, + location=location, + model=model, + ) + + @staticmethod + def parse_model_path(path: str) -> Dict[str, str]: + """Parses a model path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def tuning_job_path( + project: str, + location: str, + tuning_job: str, + ) -> str: + """Returns a fully-qualified tuning_job string.""" + return "projects/{project}/locations/{location}/tuningJobs/{tuning_job}".format( + project=project, + location=location, + tuning_job=tuning_job, + ) + + @staticmethod + def parse_tuning_job_path(path: str) -> Dict[str, str]: + """Parses a tuning_job path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/tuningJobs/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = GenAiTuningServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = GenAiTuningServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = GenAiTuningServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = GenAiTuningServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = GenAiTuningServiceClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or GenAiTuningServiceClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, GenAiTuningServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the gen ai tuning service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, GenAiTuningServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = GenAiTuningServiceClient._read_environment_variables() + self._client_cert_source = GenAiTuningServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = GenAiTuningServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, GenAiTuningServiceTransport) + if transport_provided: + # transport is a GenAiTuningServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(GenAiTuningServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or GenAiTuningServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(cast(str, transport)) + self._transport = Transport( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + def create_tuning_job( + self, + request: Optional[ + Union[genai_tuning_service.CreateTuningJobRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + tuning_job: Optional[gca_tuning_job.TuningJob] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tuning_job.TuningJob: + r"""Creates a TuningJob. A created TuningJob right away + will be attempted to be run. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_create_tuning_job(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceClient() + + # Initialize request argument(s) + tuning_job = aiplatform_v1.TuningJob() + tuning_job.base_model = "base_model_value" + tuning_job.supervised_tuning_spec.training_dataset_uri = "training_dataset_uri_value" + + request = aiplatform_v1.CreateTuningJobRequest( + parent="parent_value", + tuning_job=tuning_job, + ) + + # Make the request + response = client.create_tuning_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateTuningJobRequest, dict]): + The request object. Request message for + [GenAiTuningService.CreateTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.CreateTuningJob]. + parent (str): + Required. The resource name of the Location to create + the TuningJob in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tuning_job (google.cloud.aiplatform_v1.types.TuningJob): + Required. The TuningJob to create. + This corresponds to the ``tuning_job`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TuningJob: + Represents a TuningJob that runs with + Google owned models. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, tuning_job]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a genai_tuning_service.CreateTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, genai_tuning_service.CreateTuningJobRequest): + request = genai_tuning_service.CreateTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if tuning_job is not None: + request.tuning_job = tuning_job + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_tuning_job( + self, + request: Optional[Union[genai_tuning_service.GetTuningJobRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tuning_job.TuningJob: + r"""Gets a TuningJob. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_get_tuning_job(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTuningJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_tuning_job(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetTuningJobRequest, dict]): + The request object. Request message for + [GenAiTuningService.GetTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.GetTuningJob]. + name (str): + Required. The name of the TuningJob resource. Format: + ``projects/{project}/locations/{location}/tuningJobs/{tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.TuningJob: + Represents a TuningJob that runs with + Google owned models. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a genai_tuning_service.GetTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, genai_tuning_service.GetTuningJobRequest): + request = genai_tuning_service.GetTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_tuning_jobs( + self, + request: Optional[ + Union[genai_tuning_service.ListTuningJobsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTuningJobsPager: + r"""Lists TuningJobs in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_list_tuning_jobs(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTuningJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tuning_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListTuningJobsRequest, dict]): + The request object. Request message for + [GenAiTuningService.ListTuningJobs][google.cloud.aiplatform.v1.GenAiTuningService.ListTuningJobs]. + parent (str): + Required. The resource name of the Location to list the + TuningJobs from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.gen_ai_tuning_service.pagers.ListTuningJobsPager: + Response message for + [GenAiTuningService.ListTuningJobs][google.cloud.aiplatform.v1.GenAiTuningService.ListTuningJobs] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a genai_tuning_service.ListTuningJobsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, genai_tuning_service.ListTuningJobsRequest): + request = genai_tuning_service.ListTuningJobsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tuning_jobs] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTuningJobsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def cancel_tuning_job( + self, + request: Optional[ + Union[genai_tuning_service.CancelTuningJobRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels a TuningJob. Starts asynchronous cancellation on the + TuningJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [GenAiTuningService.GetTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.GetTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the TuningJob is not deleted; instead it becomes a + job with a + [TuningJob.error][google.cloud.aiplatform.v1.TuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TuningJob.state][google.cloud.aiplatform.v1.TuningJob.state] is + set to ``CANCELLED``. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_cancel_tuning_job(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelTuningJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_tuning_job(request=request) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CancelTuningJobRequest, dict]): + The request object. Request message for + [GenAiTuningService.CancelTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.CancelTuningJob]. + name (str): + Required. The name of the TuningJob to cancel. Format: + ``projects/{project}/locations/{location}/tuningJobs/{tuning_job}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a genai_tuning_service.CancelTuningJobRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, genai_tuning_service.CancelTuningJobRequest): + request = genai_tuning_service.CancelTuningJobRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_tuning_job] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def __enter__(self) -> "GenAiTuningServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("GenAiTuningServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/pagers.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/pagers.py new file mode 100644 index 0000000000..7f621423f6 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/pagers.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, +) + +from google.cloud.aiplatform_v1.types import genai_tuning_service +from google.cloud.aiplatform_v1.types import tuning_job + + +class ListTuningJobsPager: + """A pager for iterating through ``list_tuning_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTuningJobsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tuning_jobs`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTuningJobs`` requests and continue to iterate + through the ``tuning_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTuningJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., genai_tuning_service.ListTuningJobsResponse], + request: genai_tuning_service.ListTuningJobsRequest, + response: genai_tuning_service.ListTuningJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTuningJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTuningJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = genai_tuning_service.ListTuningJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[genai_tuning_service.ListTuningJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[tuning_job.TuningJob]: + for page in self.pages: + yield from page.tuning_jobs + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListTuningJobsAsyncPager: + """A pager for iterating through ``list_tuning_jobs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListTuningJobsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tuning_jobs`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTuningJobs`` requests and continue to iterate + through the ``tuning_jobs`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListTuningJobsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[genai_tuning_service.ListTuningJobsResponse]], + request: genai_tuning_service.ListTuningJobsRequest, + response: genai_tuning_service.ListTuningJobsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListTuningJobsRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListTuningJobsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = genai_tuning_service.ListTuningJobsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[genai_tuning_service.ListTuningJobsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[tuning_job.TuningJob]: + async def async_generator(): + async for page in self.pages: + for response in page.tuning_jobs: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/__init__.py new file mode 100644 index 0000000000..34cd0ffb89 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import GenAiTuningServiceTransport +from .grpc import GenAiTuningServiceGrpcTransport +from .grpc_asyncio import GenAiTuningServiceGrpcAsyncIOTransport +from .rest import GenAiTuningServiceRestTransport +from .rest import GenAiTuningServiceRestInterceptor + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[GenAiTuningServiceTransport]] +_transport_registry["grpc"] = GenAiTuningServiceGrpcTransport +_transport_registry["grpc_asyncio"] = GenAiTuningServiceGrpcAsyncIOTransport +_transport_registry["rest"] = GenAiTuningServiceRestTransport + +__all__ = ( + "GenAiTuningServiceTransport", + "GenAiTuningServiceGrpcTransport", + "GenAiTuningServiceGrpcAsyncIOTransport", + "GenAiTuningServiceRestTransport", + "GenAiTuningServiceRestInterceptor", +) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py new file mode 100644 index 0000000000..9aa1203f39 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/base.py @@ -0,0 +1,305 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import genai_tuning_service +from google.cloud.aiplatform_v1.types import tuning_job +from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class GenAiTuningServiceTransport(abc.ABC): + """Abstract transport class for GenAiTuningService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_tuning_job: gapic_v1.method.wrap_method( + self.create_tuning_job, + default_timeout=None, + client_info=client_info, + ), + self.get_tuning_job: gapic_v1.method.wrap_method( + self.get_tuning_job, + default_timeout=None, + client_info=client_info, + ), + self.list_tuning_jobs: gapic_v1.method.wrap_method( + self.list_tuning_jobs, + default_timeout=None, + client_info=client_info, + ), + self.cancel_tuning_job: gapic_v1.method.wrap_method( + self.cancel_tuning_job, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def create_tuning_job( + self, + ) -> Callable[ + [genai_tuning_service.CreateTuningJobRequest], + Union[gca_tuning_job.TuningJob, Awaitable[gca_tuning_job.TuningJob]], + ]: + raise NotImplementedError() + + @property + def get_tuning_job( + self, + ) -> Callable[ + [genai_tuning_service.GetTuningJobRequest], + Union[tuning_job.TuningJob, Awaitable[tuning_job.TuningJob]], + ]: + raise NotImplementedError() + + @property + def list_tuning_jobs( + self, + ) -> Callable[ + [genai_tuning_service.ListTuningJobsRequest], + Union[ + genai_tuning_service.ListTuningJobsResponse, + Awaitable[genai_tuning_service.ListTuningJobsResponse], + ], + ]: + raise NotImplementedError() + + @property + def cancel_tuning_job( + self, + ) -> Callable[ + [genai_tuning_service.CancelTuningJobRequest], + Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("GenAiTuningServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py new file mode 100644 index 0000000000..17349e1ccd --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc.py @@ -0,0 +1,572 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import genai_tuning_service +from google.cloud.aiplatform_v1.types import tuning_job +from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import GenAiTuningServiceTransport, DEFAULT_CLIENT_INFO + + +class GenAiTuningServiceGrpcTransport(GenAiTuningServiceTransport): + """gRPC backend transport for GenAiTuningService. + + A service for creating and managing GenAI Tuning Jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def create_tuning_job( + self, + ) -> Callable[ + [genai_tuning_service.CreateTuningJobRequest], gca_tuning_job.TuningJob + ]: + r"""Return a callable for the create tuning job method over gRPC. + + Creates a TuningJob. A created TuningJob right away + will be attempted to be run. + + Returns: + Callable[[~.CreateTuningJobRequest], + ~.TuningJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_tuning_job" not in self._stubs: + self._stubs["create_tuning_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiTuningService/CreateTuningJob", + request_serializer=genai_tuning_service.CreateTuningJobRequest.serialize, + response_deserializer=gca_tuning_job.TuningJob.deserialize, + ) + return self._stubs["create_tuning_job"] + + @property + def get_tuning_job( + self, + ) -> Callable[[genai_tuning_service.GetTuningJobRequest], tuning_job.TuningJob]: + r"""Return a callable for the get tuning job method over gRPC. + + Gets a TuningJob. + + Returns: + Callable[[~.GetTuningJobRequest], + ~.TuningJob]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_tuning_job" not in self._stubs: + self._stubs["get_tuning_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiTuningService/GetTuningJob", + request_serializer=genai_tuning_service.GetTuningJobRequest.serialize, + response_deserializer=tuning_job.TuningJob.deserialize, + ) + return self._stubs["get_tuning_job"] + + @property + def list_tuning_jobs( + self, + ) -> Callable[ + [genai_tuning_service.ListTuningJobsRequest], + genai_tuning_service.ListTuningJobsResponse, + ]: + r"""Return a callable for the list tuning jobs method over gRPC. + + Lists TuningJobs in a Location. + + Returns: + Callable[[~.ListTuningJobsRequest], + ~.ListTuningJobsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tuning_jobs" not in self._stubs: + self._stubs["list_tuning_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiTuningService/ListTuningJobs", + request_serializer=genai_tuning_service.ListTuningJobsRequest.serialize, + response_deserializer=genai_tuning_service.ListTuningJobsResponse.deserialize, + ) + return self._stubs["list_tuning_jobs"] + + @property + def cancel_tuning_job( + self, + ) -> Callable[[genai_tuning_service.CancelTuningJobRequest], empty_pb2.Empty]: + r"""Return a callable for the cancel tuning job method over gRPC. + + Cancels a TuningJob. Starts asynchronous cancellation on the + TuningJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [GenAiTuningService.GetTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.GetTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the TuningJob is not deleted; instead it becomes a + job with a + [TuningJob.error][google.cloud.aiplatform.v1.TuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TuningJob.state][google.cloud.aiplatform.v1.TuningJob.state] is + set to ``CANCELLED``. + + Returns: + Callable[[~.CancelTuningJobRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_tuning_job" not in self._stubs: + self._stubs["cancel_tuning_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiTuningService/CancelTuningJob", + request_serializer=genai_tuning_service.CancelTuningJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["cancel_tuning_job"] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("GenAiTuningServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..281195f55d --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/grpc_asyncio.py @@ -0,0 +1,576 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import genai_tuning_service +from google.cloud.aiplatform_v1.types import tuning_job +from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import GenAiTuningServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import GenAiTuningServiceGrpcTransport + + +class GenAiTuningServiceGrpcAsyncIOTransport(GenAiTuningServiceTransport): + """gRPC AsyncIO backend transport for GenAiTuningService. + + A service for creating and managing GenAI Tuning Jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def create_tuning_job( + self, + ) -> Callable[ + [genai_tuning_service.CreateTuningJobRequest], + Awaitable[gca_tuning_job.TuningJob], + ]: + r"""Return a callable for the create tuning job method over gRPC. + + Creates a TuningJob. A created TuningJob right away + will be attempted to be run. + + Returns: + Callable[[~.CreateTuningJobRequest], + Awaitable[~.TuningJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_tuning_job" not in self._stubs: + self._stubs["create_tuning_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiTuningService/CreateTuningJob", + request_serializer=genai_tuning_service.CreateTuningJobRequest.serialize, + response_deserializer=gca_tuning_job.TuningJob.deserialize, + ) + return self._stubs["create_tuning_job"] + + @property + def get_tuning_job( + self, + ) -> Callable[ + [genai_tuning_service.GetTuningJobRequest], Awaitable[tuning_job.TuningJob] + ]: + r"""Return a callable for the get tuning job method over gRPC. + + Gets a TuningJob. + + Returns: + Callable[[~.GetTuningJobRequest], + Awaitable[~.TuningJob]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_tuning_job" not in self._stubs: + self._stubs["get_tuning_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiTuningService/GetTuningJob", + request_serializer=genai_tuning_service.GetTuningJobRequest.serialize, + response_deserializer=tuning_job.TuningJob.deserialize, + ) + return self._stubs["get_tuning_job"] + + @property + def list_tuning_jobs( + self, + ) -> Callable[ + [genai_tuning_service.ListTuningJobsRequest], + Awaitable[genai_tuning_service.ListTuningJobsResponse], + ]: + r"""Return a callable for the list tuning jobs method over gRPC. + + Lists TuningJobs in a Location. + + Returns: + Callable[[~.ListTuningJobsRequest], + Awaitable[~.ListTuningJobsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_tuning_jobs" not in self._stubs: + self._stubs["list_tuning_jobs"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiTuningService/ListTuningJobs", + request_serializer=genai_tuning_service.ListTuningJobsRequest.serialize, + response_deserializer=genai_tuning_service.ListTuningJobsResponse.deserialize, + ) + return self._stubs["list_tuning_jobs"] + + @property + def cancel_tuning_job( + self, + ) -> Callable[ + [genai_tuning_service.CancelTuningJobRequest], Awaitable[empty_pb2.Empty] + ]: + r"""Return a callable for the cancel tuning job method over gRPC. + + Cancels a TuningJob. Starts asynchronous cancellation on the + TuningJob. The server makes a best effort to cancel the job, but + success is not guaranteed. Clients can use + [GenAiTuningService.GetTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.GetTuningJob] + or other methods to check whether the cancellation succeeded or + whether the job completed despite cancellation. On successful + cancellation, the TuningJob is not deleted; instead it becomes a + job with a + [TuningJob.error][google.cloud.aiplatform.v1.TuningJob.error] + value with a [google.rpc.Status.code][google.rpc.Status.code] of + 1, corresponding to ``Code.CANCELLED``, and + [TuningJob.state][google.cloud.aiplatform.v1.TuningJob.state] is + set to ``CANCELLED``. + + Returns: + Callable[[~.CancelTuningJobRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_tuning_job" not in self._stubs: + self._stubs["cancel_tuning_job"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.GenAiTuningService/CancelTuningJob", + request_serializer=genai_tuning_service.CancelTuningJobRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs["cancel_tuning_job"] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("GenAiTuningServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py new file mode 100644 index 0000000000..3e6c7b3984 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/gen_ai_tuning_service/transports/rest.py @@ -0,0 +1,3344 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +from google.cloud.aiplatform_v1.types import genai_tuning_service +from google.cloud.aiplatform_v1.types import tuning_job +from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job +from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +from .base import ( + GenAiTuningServiceTransport, + DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, +) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class GenAiTuningServiceRestInterceptor: + """Interceptor for GenAiTuningService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the GenAiTuningServiceRestTransport. + + .. code-block:: python + class MyCustomGenAiTuningServiceInterceptor(GenAiTuningServiceRestInterceptor): + def pre_cancel_tuning_job(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_create_tuning_job(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_tuning_job(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_tuning_job(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_tuning_job(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_tuning_jobs(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_tuning_jobs(self, response): + logging.log(f"Received response: {response}") + return response + + transport = GenAiTuningServiceRestTransport(interceptor=MyCustomGenAiTuningServiceInterceptor()) + client = GenAiTuningServiceClient(transport=transport) + + + """ + + def pre_cancel_tuning_job( + self, + request: genai_tuning_service.CancelTuningJobRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[genai_tuning_service.CancelTuningJobRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_tuning_job + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def pre_create_tuning_job( + self, + request: genai_tuning_service.CreateTuningJobRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[genai_tuning_service.CreateTuningJobRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_tuning_job + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_create_tuning_job( + self, response: gca_tuning_job.TuningJob + ) -> gca_tuning_job.TuningJob: + """Post-rpc interceptor for create_tuning_job + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + def pre_get_tuning_job( + self, + request: genai_tuning_service.GetTuningJobRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[genai_tuning_service.GetTuningJobRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_tuning_job + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_get_tuning_job( + self, response: tuning_job.TuningJob + ) -> tuning_job.TuningJob: + """Post-rpc interceptor for get_tuning_job + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + def pre_list_tuning_jobs( + self, + request: genai_tuning_service.ListTuningJobsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[genai_tuning_service.ListTuningJobsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_tuning_jobs + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_list_tuning_jobs( + self, response: genai_tuning_service.ListTuningJobsResponse + ) -> genai_tuning_service.ListTuningJobsResponse: + """Post-rpc interceptor for list_tuning_jobs + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.WaitOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the GenAiTuningService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the GenAiTuningService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class GenAiTuningServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: GenAiTuningServiceRestInterceptor + + +class GenAiTuningServiceRestTransport(GenAiTuningServiceTransport): + """REST backend transport for GenAiTuningService. + + A service for creating and managing GenAI Tuning Jobs. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via an issue in this + library's source repository. Thank you! + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[GenAiTuningServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via a GitHub issue in + this library's repository. Thank you! + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or GenAiTuningServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _CancelTuningJob(GenAiTuningServiceRestStub): + def __hash__(self): + return hash("CancelTuningJob") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: genai_tuning_service.CancelTuningJobRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ): + r"""Call the cancel tuning job method over HTTP. + + Args: + request (~.genai_tuning_service.CancelTuningJobRequest): + The request object. Request message for + [GenAiTuningService.CancelTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.CancelTuningJob]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}:cancel", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_cancel_tuning_job( + request, metadata + ) + pb_request = genai_tuning_service.CancelTuningJobRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _CreateTuningJob(GenAiTuningServiceRestStub): + def __hash__(self): + return hash("CreateTuningJob") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: genai_tuning_service.CreateTuningJobRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_tuning_job.TuningJob: + r"""Call the create tuning job method over HTTP. + + Args: + request (~.genai_tuning_service.CreateTuningJobRequest): + The request object. Request message for + [GenAiTuningService.CreateTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.CreateTuningJob]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_tuning_job.TuningJob: + Represents a TuningJob that runs with + Google owned models. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{parent=projects/*/locations/*}/tuningJobs", + "body": "tuning_job", + }, + ] + request, metadata = self._interceptor.pre_create_tuning_job( + request, metadata + ) + pb_request = genai_tuning_service.CreateTuningJobRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gca_tuning_job.TuningJob() + pb_resp = gca_tuning_job.TuningJob.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_tuning_job(resp) + return resp + + class _GetTuningJob(GenAiTuningServiceRestStub): + def __hash__(self): + return hash("GetTuningJob") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: genai_tuning_service.GetTuningJobRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> tuning_job.TuningJob: + r"""Call the get tuning job method over HTTP. + + Args: + request (~.genai_tuning_service.GetTuningJobRequest): + The request object. Request message for + [GenAiTuningService.GetTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.GetTuningJob]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.tuning_job.TuningJob: + Represents a TuningJob that runs with + Google owned models. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}", + }, + ] + request, metadata = self._interceptor.pre_get_tuning_job(request, metadata) + pb_request = genai_tuning_service.GetTuningJobRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = tuning_job.TuningJob() + pb_resp = tuning_job.TuningJob.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_tuning_job(resp) + return resp + + class _ListTuningJobs(GenAiTuningServiceRestStub): + def __hash__(self): + return hash("ListTuningJobs") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: genai_tuning_service.ListTuningJobsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> genai_tuning_service.ListTuningJobsResponse: + r"""Call the list tuning jobs method over HTTP. + + Args: + request (~.genai_tuning_service.ListTuningJobsRequest): + The request object. Request message for + [GenAiTuningService.ListTuningJobs][google.cloud.aiplatform.v1.GenAiTuningService.ListTuningJobs]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.genai_tuning_service.ListTuningJobsResponse: + Response message for + [GenAiTuningService.ListTuningJobs][google.cloud.aiplatform.v1.GenAiTuningService.ListTuningJobs] + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/tuningJobs", + }, + ] + request, metadata = self._interceptor.pre_list_tuning_jobs( + request, metadata + ) + pb_request = genai_tuning_service.ListTuningJobsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = genai_tuning_service.ListTuningJobsResponse() + pb_resp = genai_tuning_service.ListTuningJobsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_tuning_jobs(resp) + return resp + + @property + def cancel_tuning_job( + self, + ) -> Callable[[genai_tuning_service.CancelTuningJobRequest], empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CancelTuningJob(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_tuning_job( + self, + ) -> Callable[ + [genai_tuning_service.CreateTuningJobRequest], gca_tuning_job.TuningJob + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateTuningJob(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_tuning_job( + self, + ) -> Callable[[genai_tuning_service.GetTuningJobRequest], tuning_job.TuningJob]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetTuningJob(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_tuning_jobs( + self, + ) -> Callable[ + [genai_tuning_service.ListTuningJobsRequest], + genai_tuning_service.ListTuningJobsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListTuningJobs(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation(GenAiTuningServiceRestStub): + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_location(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.Location() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_location(resp) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations(GenAiTuningServiceRestStub): + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*}/locations", + }, + ] + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_locations(resp) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy(GenAiTuningServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + ] + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy(GenAiTuningServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + ] + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions(GenAiTuningServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + ] + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(GenAiTuningServiceRestStub): + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation(GenAiTuningServiceRestStub): + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(GenAiTuningServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(GenAiTuningServiceRestStub): + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation(GenAiTuningServiceRestStub): + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_wait_operation(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("GenAiTuningServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py index ea14062f02..9b7a3e466f 100644 --- a/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/index_endpoint_service/transports/rest.py @@ -796,6 +796,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -936,6 +940,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1090,6 +1098,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1254,6 +1266,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1420,6 +1436,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1588,6 +1608,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1762,6 +1786,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -1864,7 +1892,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -1918,6 +1946,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2092,6 +2124,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2244,6 +2280,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3748,6 +3788,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3888,6 +3932,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4099,6 +4147,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4263,6 +4315,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4489,6 +4545,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4657,6 +4717,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4892,6 +4956,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4994,7 +5062,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -5048,6 +5116,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5283,6 +5355,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5435,6 +5511,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/index_service/transports/rest.py b/google/cloud/aiplatform_v1/services/index_service/transports/rest.py index 080b51a548..83687ef1ef 100644 --- a/google/cloud/aiplatform_v1/services/index_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/index_service/transports/rest.py @@ -748,6 +748,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -888,6 +892,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1042,6 +1050,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1206,6 +1218,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1372,6 +1388,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1540,6 +1560,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1714,6 +1738,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -1816,7 +1844,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -1870,6 +1898,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2044,6 +2076,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2196,6 +2232,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3577,6 +3617,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3717,6 +3761,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3928,6 +3976,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4092,6 +4144,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4318,6 +4374,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4486,6 +4546,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4721,6 +4785,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4823,7 +4891,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -4877,6 +4945,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5112,6 +5184,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5264,6 +5340,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/job_service/async_client.py b/google/cloud/aiplatform_v1/services/job_service/async_client.py index 22208366e8..505ef38e2b 100644 --- a/google/cloud/aiplatform_v1/services/job_service/async_client.py +++ b/google/cloud/aiplatform_v1/services/job_service/async_client.py @@ -145,6 +145,10 @@ class JobServiceAsyncClient: parse_notification_channel_path = staticmethod( JobServiceClient.parse_notification_channel_path ) + persistent_resource_path = staticmethod(JobServiceClient.persistent_resource_path) + parse_persistent_resource_path = staticmethod( + JobServiceClient.parse_persistent_resource_path + ) tensorboard_path = staticmethod(JobServiceClient.tensorboard_path) parse_tensorboard_path = staticmethod(JobServiceClient.parse_tensorboard_path) trial_path = staticmethod(JobServiceClient.trial_path) diff --git a/google/cloud/aiplatform_v1/services/job_service/client.py b/google/cloud/aiplatform_v1/services/job_service/client.py index 65e0862483..ac78b7bd68 100644 --- a/google/cloud/aiplatform_v1/services/job_service/client.py +++ b/google/cloud/aiplatform_v1/services/job_service/client.py @@ -506,6 +506,28 @@ def parse_notification_channel_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def persistent_resource_path( + project: str, + location: str, + persistent_resource: str, + ) -> str: + """Returns a fully-qualified persistent_resource string.""" + return "projects/{project}/locations/{location}/persistentResources/{persistent_resource}".format( + project=project, + location=location, + persistent_resource=persistent_resource, + ) + + @staticmethod + def parse_persistent_resource_path(path: str) -> Dict[str, str]: + """Parses a persistent_resource path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/persistentResources/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def tensorboard_path( project: str, diff --git a/google/cloud/aiplatform_v1/services/job_service/transports/rest.py b/google/cloud/aiplatform_v1/services/job_service/transports/rest.py index ac11df0800..406d2bc13a 100644 --- a/google/cloud/aiplatform_v1/services/job_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/job_service/transports/rest.py @@ -1552,6 +1552,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1692,6 +1696,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1846,6 +1854,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2010,6 +2022,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2176,6 +2192,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2344,6 +2364,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2518,6 +2542,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2620,7 +2648,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2674,6 +2702,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2848,6 +2880,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3000,6 +3036,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -7198,6 +7238,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -7338,6 +7382,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -7549,6 +7597,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -7713,6 +7765,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -7939,6 +7995,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -8107,6 +8167,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -8342,6 +8406,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -8444,7 +8512,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -8498,6 +8566,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -8733,6 +8805,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -8885,6 +8961,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest.py b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest.py index 0dfe36a347..8ab1e58035 100644 --- a/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/llm_utility_service/transports/rest.py @@ -1307,6 +1307,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1447,6 +1451,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1658,6 +1666,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1822,6 +1834,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2048,6 +2064,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2216,6 +2236,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2451,6 +2475,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2553,7 +2581,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2607,6 +2635,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2842,6 +2874,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2994,6 +3030,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/match_service/transports/rest.py b/google/cloud/aiplatform_v1/services/match_service/transports/rest.py index d91f9e774f..8d5dff6fd4 100644 --- a/google/cloud/aiplatform_v1/services/match_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/match_service/transports/rest.py @@ -1299,6 +1299,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1439,6 +1443,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1650,6 +1658,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1814,6 +1826,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2040,6 +2056,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2208,6 +2228,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2443,6 +2467,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2545,7 +2573,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2599,6 +2627,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2834,6 +2866,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2986,6 +3022,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py index b343a4463c..d1530c52aa 100644 --- a/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/metadata_service/transports/rest.py @@ -1538,6 +1538,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1678,6 +1682,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1832,6 +1840,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1996,6 +2008,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2162,6 +2178,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2330,6 +2350,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2504,6 +2528,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2606,7 +2634,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2660,6 +2688,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2834,6 +2866,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2986,6 +3022,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -6858,6 +6898,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -6998,6 +7042,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -7209,6 +7257,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -7373,6 +7425,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -7599,6 +7655,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -7767,6 +7827,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -8002,6 +8066,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -8104,7 +8172,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -8158,6 +8226,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -8393,6 +8465,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -8545,6 +8621,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/migration_service/client.py b/google/cloud/aiplatform_v1/services/migration_service/client.py index c10de7aded..11e08352a5 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/client.py +++ b/google/cloud/aiplatform_v1/services/migration_service/client.py @@ -215,40 +215,40 @@ def parse_annotated_dataset_path(path: str) -> Dict[str, str]: @staticmethod def dataset_path( project: str, - location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/locations/{location}/datasets/{dataset}".format( + return "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match( - r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", - path, - ) + m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) return m.groupdict() if m else {} @staticmethod def dataset_path( project: str, + location: str, dataset: str, ) -> str: """Returns a fully-qualified dataset string.""" - return "projects/{project}/datasets/{dataset}".format( + return "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) @staticmethod def parse_dataset_path(path: str) -> Dict[str, str]: """Parses a dataset path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/datasets/(?P.+?)$", path) + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)$", + path, + ) return m.groupdict() if m else {} @staticmethod diff --git a/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py b/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py index 8103e76b46..712ffde9a3 100644 --- a/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/migration_service/transports/rest.py @@ -601,6 +601,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -741,6 +745,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -895,6 +903,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1059,6 +1071,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1225,6 +1241,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1393,6 +1413,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1567,6 +1591,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -1669,7 +1697,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -1723,6 +1751,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -1897,6 +1929,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2049,6 +2085,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2948,6 +2988,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3088,6 +3132,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3299,6 +3347,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3463,6 +3515,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -3689,6 +3745,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3857,6 +3917,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4092,6 +4156,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4194,7 +4262,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -4248,6 +4316,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4483,6 +4555,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -4635,6 +4711,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py index c197fb3a1e..b2e7090a25 100644 --- a/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/model_garden_service/transports/rest.py @@ -1160,6 +1160,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1300,6 +1304,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1511,6 +1519,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1675,6 +1687,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1901,6 +1917,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2069,6 +2089,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2304,6 +2328,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2406,7 +2434,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2460,6 +2488,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2695,6 +2727,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2847,6 +2883,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/model_service/transports/rest.py b/google/cloud/aiplatform_v1/services/model_service/transports/rest.py index 0abf759cfa..e3a5942e5c 100644 --- a/google/cloud/aiplatform_v1/services/model_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/model_service/transports/rest.py @@ -1096,6 +1096,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1236,6 +1240,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1390,6 +1398,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1554,6 +1566,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1720,6 +1736,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1888,6 +1908,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2062,6 +2086,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2164,7 +2192,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2218,6 +2246,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2392,6 +2424,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2544,6 +2580,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5043,6 +5083,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5183,6 +5227,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5394,6 +5442,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5558,6 +5610,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -5784,6 +5840,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5952,6 +6012,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -6187,6 +6251,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -6289,7 +6357,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -6343,6 +6411,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -6578,6 +6650,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -6730,6 +6806,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/notebook_service/__init__.py b/google/cloud/aiplatform_v1/services/notebook_service/__init__.py new file mode 100644 index 0000000000..4e038695c2 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/notebook_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import NotebookServiceClient +from .async_client import NotebookServiceAsyncClient + +__all__ = ( + "NotebookServiceClient", + "NotebookServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1/services/notebook_service/async_client.py b/google/cloud/aiplatform_v1/services/notebook_service/async_client.py new file mode 100644 index 0000000000..4193466b6b --- /dev/null +++ b/google/cloud/aiplatform_v1/services/notebook_service/async_client.py @@ -0,0 +1,2316 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.notebook_service import pagers +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import network_spec +from google.cloud.aiplatform_v1.types import notebook_euc_config +from google.cloud.aiplatform_v1.types import notebook_idle_shutdown_config +from google.cloud.aiplatform_v1.types import notebook_runtime +from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime +from google.cloud.aiplatform_v1.types import notebook_runtime_template_ref +from google.cloud.aiplatform_v1.types import notebook_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import NotebookServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import NotebookServiceGrpcAsyncIOTransport +from .client import NotebookServiceClient + + +class NotebookServiceAsyncClient: + """The interface for Vertex Notebook service (a.k.a. Colab on + Workbench). + """ + + _client: NotebookServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = NotebookServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = NotebookServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = NotebookServiceClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = NotebookServiceClient._DEFAULT_UNIVERSE + + network_path = staticmethod(NotebookServiceClient.network_path) + parse_network_path = staticmethod(NotebookServiceClient.parse_network_path) + notebook_runtime_path = staticmethod(NotebookServiceClient.notebook_runtime_path) + parse_notebook_runtime_path = staticmethod( + NotebookServiceClient.parse_notebook_runtime_path + ) + notebook_runtime_template_path = staticmethod( + NotebookServiceClient.notebook_runtime_template_path + ) + parse_notebook_runtime_template_path = staticmethod( + NotebookServiceClient.parse_notebook_runtime_template_path + ) + subnetwork_path = staticmethod(NotebookServiceClient.subnetwork_path) + parse_subnetwork_path = staticmethod(NotebookServiceClient.parse_subnetwork_path) + common_billing_account_path = staticmethod( + NotebookServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + NotebookServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(NotebookServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + NotebookServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + NotebookServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + NotebookServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(NotebookServiceClient.common_project_path) + parse_common_project_path = staticmethod( + NotebookServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(NotebookServiceClient.common_location_path) + parse_common_location_path = staticmethod( + NotebookServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NotebookServiceAsyncClient: The constructed client. + """ + return NotebookServiceClient.from_service_account_info.__func__(NotebookServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NotebookServiceAsyncClient: The constructed client. + """ + return NotebookServiceClient.from_service_account_file.__func__(NotebookServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return NotebookServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> NotebookServiceTransport: + """Returns the transport used by the client instance. + + Returns: + NotebookServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = functools.partial( + type(NotebookServiceClient).get_transport_class, type(NotebookServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, NotebookServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the notebook service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.NotebookServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = NotebookServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_notebook_runtime_template( + self, + request: Optional[ + Union[notebook_service.CreateNotebookRuntimeTemplateRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + notebook_runtime_template: Optional[ + notebook_runtime.NotebookRuntimeTemplate + ] = None, + notebook_runtime_template_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a NotebookRuntimeTemplate. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_create_notebook_runtime_template(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + notebook_runtime_template = aiplatform_v1.NotebookRuntimeTemplate() + notebook_runtime_template.display_name = "display_name_value" + + request = aiplatform_v1.CreateNotebookRuntimeTemplateRequest( + parent="parent_value", + notebook_runtime_template=notebook_runtime_template, + ) + + # Make the request + operation = client.create_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.CreateNotebookRuntimeTemplateRequest, dict]]): + The request object. Request message for + [NotebookService.CreateNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.CreateNotebookRuntimeTemplate]. + parent (:class:`str`): + Required. The resource name of the Location to create + the NotebookRuntimeTemplate. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_template (:class:`google.cloud.aiplatform_v1.types.NotebookRuntimeTemplate`): + Required. The NotebookRuntimeTemplate + to create. + + This corresponds to the ``notebook_runtime_template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_template_id (:class:`str`): + Optional. User specified ID for the + notebook runtime template. + + This corresponds to the ``notebook_runtime_template_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.NotebookRuntimeTemplate` A template that specifies runtime configurations such as machine type, + runtime version, network configurations, etc. + Multiple runtimes can be created from a runtime + template. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, notebook_runtime_template, notebook_runtime_template_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.CreateNotebookRuntimeTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if notebook_runtime_template is not None: + request.notebook_runtime_template = notebook_runtime_template + if notebook_runtime_template_id is not None: + request.notebook_runtime_template_id = notebook_runtime_template_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_notebook_runtime_template, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + notebook_runtime.NotebookRuntimeTemplate, + metadata_type=notebook_service.CreateNotebookRuntimeTemplateOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_notebook_runtime_template( + self, + request: Optional[ + Union[notebook_service.GetNotebookRuntimeTemplateRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_runtime.NotebookRuntimeTemplate: + r"""Gets a NotebookRuntimeTemplate. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_get_notebook_runtime_template(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + response = await client.get_notebook_runtime_template(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.GetNotebookRuntimeTemplateRequest, dict]]): + The request object. Request message for + [NotebookService.GetNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.GetNotebookRuntimeTemplate] + name (:class:`str`): + Required. The name of the NotebookRuntimeTemplate + resource. Format: + ``projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.NotebookRuntimeTemplate: + A template that specifies runtime + configurations such as machine type, + runtime version, network configurations, + etc. Multiple runtimes can be created + from a runtime template. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.GetNotebookRuntimeTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_notebook_runtime_template, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_notebook_runtime_templates( + self, + request: Optional[ + Union[notebook_service.ListNotebookRuntimeTemplatesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotebookRuntimeTemplatesAsyncPager: + r"""Lists NotebookRuntimeTemplates in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_list_notebook_runtime_templates(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListNotebookRuntimeTemplatesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtime_templates(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.ListNotebookRuntimeTemplatesRequest, dict]]): + The request object. Request message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimeTemplates]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the NotebookRuntimeTemplates. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.notebook_service.pagers.ListNotebookRuntimeTemplatesAsyncPager: + Response message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimeTemplates]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.ListNotebookRuntimeTemplatesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_notebook_runtime_templates, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListNotebookRuntimeTemplatesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_notebook_runtime_template( + self, + request: Optional[ + Union[notebook_service.DeleteNotebookRuntimeTemplateRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a NotebookRuntimeTemplate. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_delete_notebook_runtime_template(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.DeleteNotebookRuntimeTemplateRequest, dict]]): + The request object. Request message for + [NotebookService.DeleteNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntimeTemplate]. + name (:class:`str`): + Required. The name of the NotebookRuntimeTemplate + resource to be deleted. Format: + ``projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.DeleteNotebookRuntimeTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_notebook_runtime_template, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def assign_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.AssignNotebookRuntimeRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + notebook_runtime_template: Optional[str] = None, + notebook_runtime: Optional[gca_notebook_runtime.NotebookRuntime] = None, + notebook_runtime_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Assigns a NotebookRuntime to a user for a particular + Notebook file. This method will either returns an + existing assignment or generates a new one. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_assign_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + notebook_runtime = aiplatform_v1.NotebookRuntime() + notebook_runtime.runtime_user = "runtime_user_value" + notebook_runtime.display_name = "display_name_value" + + request = aiplatform_v1.AssignNotebookRuntimeRequest( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=notebook_runtime, + ) + + # Make the request + operation = client.assign_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.AssignNotebookRuntimeRequest, dict]]): + The request object. Request message for + [NotebookService.AssignNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.AssignNotebookRuntime]. + parent (:class:`str`): + Required. The resource name of the Location to get the + NotebookRuntime assignment. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_template (:class:`str`): + Required. The resource name of the + NotebookRuntimeTemplate based on which a + NotebookRuntime will be assigned (reuse + or create a new one). + + This corresponds to the ``notebook_runtime_template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime (:class:`google.cloud.aiplatform_v1.types.NotebookRuntime`): + Required. Provide runtime specific + information (e.g. runtime owner, + notebook id) used for NotebookRuntime + assignment. + + This corresponds to the ``notebook_runtime`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_id (:class:`str`): + Optional. User specified ID for the + notebook runtime. + + This corresponds to the ``notebook_runtime_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.NotebookRuntime` A runtime is a virtual machine allocated to a particular user for a + particular Notebook file on temporary basis with + lifetime limited to 24 hours. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, notebook_runtime_template, notebook_runtime, notebook_runtime_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.AssignNotebookRuntimeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if notebook_runtime_template is not None: + request.notebook_runtime_template = notebook_runtime_template + if notebook_runtime is not None: + request.notebook_runtime = notebook_runtime + if notebook_runtime_id is not None: + request.notebook_runtime_id = notebook_runtime_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.assign_notebook_runtime, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_notebook_runtime.NotebookRuntime, + metadata_type=notebook_service.AssignNotebookRuntimeOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.GetNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_runtime.NotebookRuntime: + r"""Gets a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_get_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + response = await client.get_notebook_runtime(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.GetNotebookRuntimeRequest, dict]]): + The request object. Request message for + [NotebookService.GetNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.GetNotebookRuntime] + name (:class:`str`): + Required. The name of the + NotebookRuntime resource. Instead of + checking whether the name is in valid + NotebookRuntime resource name format, + directly throw NotFound exception if + there is no such NotebookRuntime in + spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.NotebookRuntime: + A runtime is a virtual machine + allocated to a particular user for a + particular Notebook file on temporary + basis with lifetime limited to 24 hours. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.GetNotebookRuntimeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_notebook_runtime, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_notebook_runtimes( + self, + request: Optional[ + Union[notebook_service.ListNotebookRuntimesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotebookRuntimesAsyncPager: + r"""Lists NotebookRuntimes in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_list_notebook_runtimes(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListNotebookRuntimesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtimes(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.ListNotebookRuntimesRequest, dict]]): + The request object. Request message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimes]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the NotebookRuntimes. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.notebook_service.pagers.ListNotebookRuntimesAsyncPager: + Response message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.ListNotebookRuntimesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_notebook_runtimes, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListNotebookRuntimesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.DeleteNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_delete_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.DeleteNotebookRuntimeRequest, dict]]): + The request object. Request message for + [NotebookService.DeleteNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntime]. + name (:class:`str`): + Required. The name of the + NotebookRuntime resource to be deleted. + Instead of checking whether the name is + in valid NotebookRuntime resource name + format, directly throw NotFound + exception if there is no such + NotebookRuntime in spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.DeleteNotebookRuntimeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_notebook_runtime, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def upgrade_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.UpgradeNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Upgrades a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_upgrade_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpgradeNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.upgrade_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.UpgradeNotebookRuntimeRequest, dict]]): + The request object. Request message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.UpgradeNotebookRuntime]. + name (:class:`str`): + Required. The name of the + NotebookRuntime resource to be upgrade. + Instead of checking whether the name is + in valid NotebookRuntime resource name + format, directly throw NotFound + exception if there is no such + NotebookRuntime in spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.UpgradeNotebookRuntimeResponse` Response message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.UpgradeNotebookRuntime]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.UpgradeNotebookRuntimeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.upgrade_notebook_runtime, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + notebook_service.UpgradeNotebookRuntimeResponse, + metadata_type=notebook_service.UpgradeNotebookRuntimeOperationMetadata, + ) + + # Done; return the response. + return response + + async def start_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.StartNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Starts a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_start_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.StartNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.start_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.StartNotebookRuntimeRequest, dict]]): + The request object. Request message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.StartNotebookRuntime]. + name (:class:`str`): + Required. The name of the + NotebookRuntime resource to be started. + Instead of checking whether the name is + in valid NotebookRuntime resource name + format, directly throw NotFound + exception if there is no such + NotebookRuntime in spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.StartNotebookRuntimeResponse` Response message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.StartNotebookRuntime]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.StartNotebookRuntimeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.start_notebook_runtime, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + notebook_service.StartNotebookRuntimeResponse, + metadata_type=notebook_service.StartNotebookRuntimeOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "NotebookServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("NotebookServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/notebook_service/client.py b/google/cloud/aiplatform_v1/services/notebook_service/client.py new file mode 100644 index 0000000000..2131105155 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/notebook_service/client.py @@ -0,0 +1,2801 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.notebook_service import pagers +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import network_spec +from google.cloud.aiplatform_v1.types import notebook_euc_config +from google.cloud.aiplatform_v1.types import notebook_idle_shutdown_config +from google.cloud.aiplatform_v1.types import notebook_runtime +from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime +from google.cloud.aiplatform_v1.types import notebook_runtime_template_ref +from google.cloud.aiplatform_v1.types import notebook_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import NotebookServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import NotebookServiceGrpcTransport +from .transports.grpc_asyncio import NotebookServiceGrpcAsyncIOTransport +from .transports.rest import NotebookServiceRestTransport + + +class NotebookServiceClientMeta(type): + """Metaclass for the NotebookService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[NotebookServiceTransport]] + _transport_registry["grpc"] = NotebookServiceGrpcTransport + _transport_registry["grpc_asyncio"] = NotebookServiceGrpcAsyncIOTransport + _transport_registry["rest"] = NotebookServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[NotebookServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class NotebookServiceClient(metaclass=NotebookServiceClientMeta): + """The interface for Vertex Notebook service (a.k.a. Colab on + Workbench). + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NotebookServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NotebookServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> NotebookServiceTransport: + """Returns the transport used by the client instance. + + Returns: + NotebookServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def network_path( + project: str, + network: str, + ) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format( + project=project, + network=network, + ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str, str]: + """Parses a network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/global/networks/(?P.+?)$", path + ) + return m.groupdict() if m else {} + + @staticmethod + def notebook_runtime_path( + project: str, + location: str, + notebook_runtime: str, + ) -> str: + """Returns a fully-qualified notebook_runtime string.""" + return "projects/{project}/locations/{location}/notebookRuntimes/{notebook_runtime}".format( + project=project, + location=location, + notebook_runtime=notebook_runtime, + ) + + @staticmethod + def parse_notebook_runtime_path(path: str) -> Dict[str, str]: + """Parses a notebook_runtime path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/notebookRuntimes/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def notebook_runtime_template_path( + project: str, + location: str, + notebook_runtime_template: str, + ) -> str: + """Returns a fully-qualified notebook_runtime_template string.""" + return "projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}".format( + project=project, + location=location, + notebook_runtime_template=notebook_runtime_template, + ) + + @staticmethod + def parse_notebook_runtime_template_path(path: str) -> Dict[str, str]: + """Parses a notebook_runtime_template path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/notebookRuntimeTemplates/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def subnetwork_path( + project: str, + region: str, + subnetwork: str, + ) -> str: + """Returns a fully-qualified subnetwork string.""" + return "projects/{project}/regions/{region}/subnetworks/{subnetwork}".format( + project=project, + region=region, + subnetwork=subnetwork, + ) + + @staticmethod + def parse_subnetwork_path(path: str) -> Dict[str, str]: + """Parses a subnetwork path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/regions/(?P.+?)/subnetworks/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = NotebookServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = NotebookServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = NotebookServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = NotebookServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = NotebookServiceClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or NotebookServiceClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, NotebookServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the notebook service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, NotebookServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = NotebookServiceClient._read_environment_variables() + self._client_cert_source = NotebookServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = NotebookServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, NotebookServiceTransport) + if transport_provided: + # transport is a NotebookServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(NotebookServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or NotebookServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(cast(str, transport)) + self._transport = Transport( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + def create_notebook_runtime_template( + self, + request: Optional[ + Union[notebook_service.CreateNotebookRuntimeTemplateRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + notebook_runtime_template: Optional[ + notebook_runtime.NotebookRuntimeTemplate + ] = None, + notebook_runtime_template_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a NotebookRuntimeTemplate. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_create_notebook_runtime_template(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + notebook_runtime_template = aiplatform_v1.NotebookRuntimeTemplate() + notebook_runtime_template.display_name = "display_name_value" + + request = aiplatform_v1.CreateNotebookRuntimeTemplateRequest( + parent="parent_value", + notebook_runtime_template=notebook_runtime_template, + ) + + # Make the request + operation = client.create_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreateNotebookRuntimeTemplateRequest, dict]): + The request object. Request message for + [NotebookService.CreateNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.CreateNotebookRuntimeTemplate]. + parent (str): + Required. The resource name of the Location to create + the NotebookRuntimeTemplate. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_template (google.cloud.aiplatform_v1.types.NotebookRuntimeTemplate): + Required. The NotebookRuntimeTemplate + to create. + + This corresponds to the ``notebook_runtime_template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_template_id (str): + Optional. User specified ID for the + notebook runtime template. + + This corresponds to the ``notebook_runtime_template_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.NotebookRuntimeTemplate` A template that specifies runtime configurations such as machine type, + runtime version, network configurations, etc. + Multiple runtimes can be created from a runtime + template. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, notebook_runtime_template, notebook_runtime_template_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.CreateNotebookRuntimeTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notebook_service.CreateNotebookRuntimeTemplateRequest + ): + request = notebook_service.CreateNotebookRuntimeTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if notebook_runtime_template is not None: + request.notebook_runtime_template = notebook_runtime_template + if notebook_runtime_template_id is not None: + request.notebook_runtime_template_id = notebook_runtime_template_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_notebook_runtime_template + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + notebook_runtime.NotebookRuntimeTemplate, + metadata_type=notebook_service.CreateNotebookRuntimeTemplateOperationMetadata, + ) + + # Done; return the response. + return response + + def get_notebook_runtime_template( + self, + request: Optional[ + Union[notebook_service.GetNotebookRuntimeTemplateRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_runtime.NotebookRuntimeTemplate: + r"""Gets a NotebookRuntimeTemplate. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_get_notebook_runtime_template(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + response = client.get_notebook_runtime_template(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetNotebookRuntimeTemplateRequest, dict]): + The request object. Request message for + [NotebookService.GetNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.GetNotebookRuntimeTemplate] + name (str): + Required. The name of the NotebookRuntimeTemplate + resource. Format: + ``projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.NotebookRuntimeTemplate: + A template that specifies runtime + configurations such as machine type, + runtime version, network configurations, + etc. Multiple runtimes can be created + from a runtime template. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.GetNotebookRuntimeTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.GetNotebookRuntimeTemplateRequest): + request = notebook_service.GetNotebookRuntimeTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.get_notebook_runtime_template + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_notebook_runtime_templates( + self, + request: Optional[ + Union[notebook_service.ListNotebookRuntimeTemplatesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotebookRuntimeTemplatesPager: + r"""Lists NotebookRuntimeTemplates in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_list_notebook_runtime_templates(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListNotebookRuntimeTemplatesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtime_templates(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListNotebookRuntimeTemplatesRequest, dict]): + The request object. Request message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimeTemplates]. + parent (str): + Required. The resource name of the Location from which + to list the NotebookRuntimeTemplates. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.notebook_service.pagers.ListNotebookRuntimeTemplatesPager: + Response message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimeTemplates]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.ListNotebookRuntimeTemplatesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notebook_service.ListNotebookRuntimeTemplatesRequest + ): + request = notebook_service.ListNotebookRuntimeTemplatesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_notebook_runtime_templates + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListNotebookRuntimeTemplatesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_notebook_runtime_template( + self, + request: Optional[ + Union[notebook_service.DeleteNotebookRuntimeTemplateRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a NotebookRuntimeTemplate. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_delete_notebook_runtime_template(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteNotebookRuntimeTemplateRequest, dict]): + The request object. Request message for + [NotebookService.DeleteNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntimeTemplate]. + name (str): + Required. The name of the NotebookRuntimeTemplate + resource to be deleted. Format: + ``projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.DeleteNotebookRuntimeTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notebook_service.DeleteNotebookRuntimeTemplateRequest + ): + request = notebook_service.DeleteNotebookRuntimeTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_notebook_runtime_template + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def assign_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.AssignNotebookRuntimeRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + notebook_runtime_template: Optional[str] = None, + notebook_runtime: Optional[gca_notebook_runtime.NotebookRuntime] = None, + notebook_runtime_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Assigns a NotebookRuntime to a user for a particular + Notebook file. This method will either returns an + existing assignment or generates a new one. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_assign_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + notebook_runtime = aiplatform_v1.NotebookRuntime() + notebook_runtime.runtime_user = "runtime_user_value" + notebook_runtime.display_name = "display_name_value" + + request = aiplatform_v1.AssignNotebookRuntimeRequest( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=notebook_runtime, + ) + + # Make the request + operation = client.assign_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.AssignNotebookRuntimeRequest, dict]): + The request object. Request message for + [NotebookService.AssignNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.AssignNotebookRuntime]. + parent (str): + Required. The resource name of the Location to get the + NotebookRuntime assignment. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_template (str): + Required. The resource name of the + NotebookRuntimeTemplate based on which a + NotebookRuntime will be assigned (reuse + or create a new one). + + This corresponds to the ``notebook_runtime_template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime (google.cloud.aiplatform_v1.types.NotebookRuntime): + Required. Provide runtime specific + information (e.g. runtime owner, + notebook id) used for NotebookRuntime + assignment. + + This corresponds to the ``notebook_runtime`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_id (str): + Optional. User specified ID for the + notebook runtime. + + This corresponds to the ``notebook_runtime_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.NotebookRuntime` A runtime is a virtual machine allocated to a particular user for a + particular Notebook file on temporary basis with + lifetime limited to 24 hours. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, notebook_runtime_template, notebook_runtime, notebook_runtime_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.AssignNotebookRuntimeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.AssignNotebookRuntimeRequest): + request = notebook_service.AssignNotebookRuntimeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if notebook_runtime_template is not None: + request.notebook_runtime_template = notebook_runtime_template + if notebook_runtime is not None: + request.notebook_runtime = notebook_runtime + if notebook_runtime_id is not None: + request.notebook_runtime_id = notebook_runtime_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.assign_notebook_runtime] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_notebook_runtime.NotebookRuntime, + metadata_type=notebook_service.AssignNotebookRuntimeOperationMetadata, + ) + + # Done; return the response. + return response + + def get_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.GetNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_runtime.NotebookRuntime: + r"""Gets a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_get_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + response = client.get_notebook_runtime(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetNotebookRuntimeRequest, dict]): + The request object. Request message for + [NotebookService.GetNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.GetNotebookRuntime] + name (str): + Required. The name of the + NotebookRuntime resource. Instead of + checking whether the name is in valid + NotebookRuntime resource name format, + directly throw NotFound exception if + there is no such NotebookRuntime in + spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.NotebookRuntime: + A runtime is a virtual machine + allocated to a particular user for a + particular Notebook file on temporary + basis with lifetime limited to 24 hours. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.GetNotebookRuntimeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.GetNotebookRuntimeRequest): + request = notebook_service.GetNotebookRuntimeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_notebook_runtime] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_notebook_runtimes( + self, + request: Optional[ + Union[notebook_service.ListNotebookRuntimesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotebookRuntimesPager: + r"""Lists NotebookRuntimes in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_list_notebook_runtimes(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListNotebookRuntimesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtimes(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListNotebookRuntimesRequest, dict]): + The request object. Request message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimes]. + parent (str): + Required. The resource name of the Location from which + to list the NotebookRuntimes. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.notebook_service.pagers.ListNotebookRuntimesPager: + Response message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.ListNotebookRuntimesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.ListNotebookRuntimesRequest): + request = notebook_service.ListNotebookRuntimesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_notebook_runtimes] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListNotebookRuntimesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.DeleteNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_delete_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeleteNotebookRuntimeRequest, dict]): + The request object. Request message for + [NotebookService.DeleteNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntime]. + name (str): + Required. The name of the + NotebookRuntime resource to be deleted. + Instead of checking whether the name is + in valid NotebookRuntime resource name + format, directly throw NotFound + exception if there is no such + NotebookRuntime in spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.DeleteNotebookRuntimeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.DeleteNotebookRuntimeRequest): + request = notebook_service.DeleteNotebookRuntimeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_notebook_runtime] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def upgrade_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.UpgradeNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Upgrades a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_upgrade_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpgradeNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.upgrade_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpgradeNotebookRuntimeRequest, dict]): + The request object. Request message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.UpgradeNotebookRuntime]. + name (str): + Required. The name of the + NotebookRuntime resource to be upgrade. + Instead of checking whether the name is + in valid NotebookRuntime resource name + format, directly throw NotFound + exception if there is no such + NotebookRuntime in spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.UpgradeNotebookRuntimeResponse` Response message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.UpgradeNotebookRuntime]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.UpgradeNotebookRuntimeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.UpgradeNotebookRuntimeRequest): + request = notebook_service.UpgradeNotebookRuntimeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.upgrade_notebook_runtime] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + notebook_service.UpgradeNotebookRuntimeResponse, + metadata_type=notebook_service.UpgradeNotebookRuntimeOperationMetadata, + ) + + # Done; return the response. + return response + + def start_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.StartNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Starts a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_start_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.StartNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.start_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.StartNotebookRuntimeRequest, dict]): + The request object. Request message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.StartNotebookRuntime]. + name (str): + Required. The name of the + NotebookRuntime resource to be started. + Instead of checking whether the name is + in valid NotebookRuntime resource name + format, directly throw NotFound + exception if there is no such + NotebookRuntime in spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.StartNotebookRuntimeResponse` Response message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.StartNotebookRuntime]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.StartNotebookRuntimeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.StartNotebookRuntimeRequest): + request = notebook_service.StartNotebookRuntimeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.start_notebook_runtime] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + notebook_service.StartNotebookRuntimeResponse, + metadata_type=notebook_service.StartNotebookRuntimeOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "NotebookServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("NotebookServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/notebook_service/pagers.py b/google/cloud/aiplatform_v1/services/notebook_service/pagers.py new file mode 100644 index 0000000000..cae91c1715 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/notebook_service/pagers.py @@ -0,0 +1,290 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, +) + +from google.cloud.aiplatform_v1.types import notebook_runtime +from google.cloud.aiplatform_v1.types import notebook_service + + +class ListNotebookRuntimeTemplatesPager: + """A pager for iterating through ``list_notebook_runtime_templates`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListNotebookRuntimeTemplatesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``notebook_runtime_templates`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListNotebookRuntimeTemplates`` requests and continue to iterate + through the ``notebook_runtime_templates`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListNotebookRuntimeTemplatesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., notebook_service.ListNotebookRuntimeTemplatesResponse], + request: notebook_service.ListNotebookRuntimeTemplatesRequest, + response: notebook_service.ListNotebookRuntimeTemplatesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListNotebookRuntimeTemplatesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListNotebookRuntimeTemplatesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notebook_service.ListNotebookRuntimeTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[notebook_service.ListNotebookRuntimeTemplatesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[notebook_runtime.NotebookRuntimeTemplate]: + for page in self.pages: + yield from page.notebook_runtime_templates + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListNotebookRuntimeTemplatesAsyncPager: + """A pager for iterating through ``list_notebook_runtime_templates`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListNotebookRuntimeTemplatesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``notebook_runtime_templates`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListNotebookRuntimeTemplates`` requests and continue to iterate + through the ``notebook_runtime_templates`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListNotebookRuntimeTemplatesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[notebook_service.ListNotebookRuntimeTemplatesResponse] + ], + request: notebook_service.ListNotebookRuntimeTemplatesRequest, + response: notebook_service.ListNotebookRuntimeTemplatesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListNotebookRuntimeTemplatesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListNotebookRuntimeTemplatesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notebook_service.ListNotebookRuntimeTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[notebook_service.ListNotebookRuntimeTemplatesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[notebook_runtime.NotebookRuntimeTemplate]: + async def async_generator(): + async for page in self.pages: + for response in page.notebook_runtime_templates: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListNotebookRuntimesPager: + """A pager for iterating through ``list_notebook_runtimes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListNotebookRuntimesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``notebook_runtimes`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListNotebookRuntimes`` requests and continue to iterate + through the ``notebook_runtimes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListNotebookRuntimesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., notebook_service.ListNotebookRuntimesResponse], + request: notebook_service.ListNotebookRuntimesRequest, + response: notebook_service.ListNotebookRuntimesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListNotebookRuntimesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListNotebookRuntimesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notebook_service.ListNotebookRuntimesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[notebook_service.ListNotebookRuntimesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[notebook_runtime.NotebookRuntime]: + for page in self.pages: + yield from page.notebook_runtimes + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListNotebookRuntimesAsyncPager: + """A pager for iterating through ``list_notebook_runtimes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListNotebookRuntimesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``notebook_runtimes`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListNotebookRuntimes`` requests and continue to iterate + through the ``notebook_runtimes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListNotebookRuntimesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[notebook_service.ListNotebookRuntimesResponse]], + request: notebook_service.ListNotebookRuntimesRequest, + response: notebook_service.ListNotebookRuntimesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListNotebookRuntimesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListNotebookRuntimesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notebook_service.ListNotebookRuntimesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[notebook_service.ListNotebookRuntimesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[notebook_runtime.NotebookRuntime]: + async def async_generator(): + async for page in self.pages: + for response in page.notebook_runtimes: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/__init__.py new file mode 100644 index 0000000000..eedf575a7f --- /dev/null +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import NotebookServiceTransport +from .grpc import NotebookServiceGrpcTransport +from .grpc_asyncio import NotebookServiceGrpcAsyncIOTransport +from .rest import NotebookServiceRestTransport +from .rest import NotebookServiceRestInterceptor + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[NotebookServiceTransport]] +_transport_registry["grpc"] = NotebookServiceGrpcTransport +_transport_registry["grpc_asyncio"] = NotebookServiceGrpcAsyncIOTransport +_transport_registry["rest"] = NotebookServiceRestTransport + +__all__ = ( + "NotebookServiceTransport", + "NotebookServiceGrpcTransport", + "NotebookServiceGrpcAsyncIOTransport", + "NotebookServiceRestTransport", + "NotebookServiceRestInterceptor", +) diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py new file mode 100644 index 0000000000..4f2350b34d --- /dev/null +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/base.py @@ -0,0 +1,402 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import notebook_runtime +from google.cloud.aiplatform_v1.types import notebook_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class NotebookServiceTransport(abc.ABC): + """Abstract transport class for NotebookService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_notebook_runtime_template: gapic_v1.method.wrap_method( + self.create_notebook_runtime_template, + default_timeout=None, + client_info=client_info, + ), + self.get_notebook_runtime_template: gapic_v1.method.wrap_method( + self.get_notebook_runtime_template, + default_timeout=None, + client_info=client_info, + ), + self.list_notebook_runtime_templates: gapic_v1.method.wrap_method( + self.list_notebook_runtime_templates, + default_timeout=None, + client_info=client_info, + ), + self.delete_notebook_runtime_template: gapic_v1.method.wrap_method( + self.delete_notebook_runtime_template, + default_timeout=None, + client_info=client_info, + ), + self.assign_notebook_runtime: gapic_v1.method.wrap_method( + self.assign_notebook_runtime, + default_timeout=None, + client_info=client_info, + ), + self.get_notebook_runtime: gapic_v1.method.wrap_method( + self.get_notebook_runtime, + default_timeout=None, + client_info=client_info, + ), + self.list_notebook_runtimes: gapic_v1.method.wrap_method( + self.list_notebook_runtimes, + default_timeout=None, + client_info=client_info, + ), + self.delete_notebook_runtime: gapic_v1.method.wrap_method( + self.delete_notebook_runtime, + default_timeout=None, + client_info=client_info, + ), + self.upgrade_notebook_runtime: gapic_v1.method.wrap_method( + self.upgrade_notebook_runtime, + default_timeout=None, + client_info=client_info, + ), + self.start_notebook_runtime: gapic_v1.method.wrap_method( + self.start_notebook_runtime, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.CreateNotebookRuntimeTemplateRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeTemplateRequest], + Union[ + notebook_runtime.NotebookRuntimeTemplate, + Awaitable[notebook_runtime.NotebookRuntimeTemplate], + ], + ]: + raise NotImplementedError() + + @property + def list_notebook_runtime_templates( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimeTemplatesRequest], + Union[ + notebook_service.ListNotebookRuntimeTemplatesResponse, + Awaitable[notebook_service.ListNotebookRuntimeTemplatesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeTemplateRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def assign_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.AssignNotebookRuntimeRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeRequest], + Union[ + notebook_runtime.NotebookRuntime, + Awaitable[notebook_runtime.NotebookRuntime], + ], + ]: + raise NotImplementedError() + + @property + def list_notebook_runtimes( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimesRequest], + Union[ + notebook_service.ListNotebookRuntimesResponse, + Awaitable[notebook_service.ListNotebookRuntimesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def upgrade_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.UpgradeNotebookRuntimeRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def start_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.StartNotebookRuntimeRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("NotebookServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc.py new file mode 100644 index 0000000000..337a95d69c --- /dev/null +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc.py @@ -0,0 +1,763 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import notebook_runtime +from google.cloud.aiplatform_v1.types import notebook_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import NotebookServiceTransport, DEFAULT_CLIENT_INFO + + +class NotebookServiceGrpcTransport(NotebookServiceTransport): + """gRPC backend transport for NotebookService. + + The interface for Vertex Notebook service (a.k.a. Colab on + Workbench). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.CreateNotebookRuntimeTemplateRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the create notebook runtime + template method over gRPC. + + Creates a NotebookRuntimeTemplate. + + Returns: + Callable[[~.CreateNotebookRuntimeTemplateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_notebook_runtime_template" not in self._stubs: + self._stubs[ + "create_notebook_runtime_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/CreateNotebookRuntimeTemplate", + request_serializer=notebook_service.CreateNotebookRuntimeTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_notebook_runtime_template"] + + @property + def get_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeTemplateRequest], + notebook_runtime.NotebookRuntimeTemplate, + ]: + r"""Return a callable for the get notebook runtime template method over gRPC. + + Gets a NotebookRuntimeTemplate. + + Returns: + Callable[[~.GetNotebookRuntimeTemplateRequest], + ~.NotebookRuntimeTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notebook_runtime_template" not in self._stubs: + self._stubs[ + "get_notebook_runtime_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/GetNotebookRuntimeTemplate", + request_serializer=notebook_service.GetNotebookRuntimeTemplateRequest.serialize, + response_deserializer=notebook_runtime.NotebookRuntimeTemplate.deserialize, + ) + return self._stubs["get_notebook_runtime_template"] + + @property + def list_notebook_runtime_templates( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimeTemplatesRequest], + notebook_service.ListNotebookRuntimeTemplatesResponse, + ]: + r"""Return a callable for the list notebook runtime + templates method over gRPC. + + Lists NotebookRuntimeTemplates in a Location. + + Returns: + Callable[[~.ListNotebookRuntimeTemplatesRequest], + ~.ListNotebookRuntimeTemplatesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notebook_runtime_templates" not in self._stubs: + self._stubs[ + "list_notebook_runtime_templates" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/ListNotebookRuntimeTemplates", + request_serializer=notebook_service.ListNotebookRuntimeTemplatesRequest.serialize, + response_deserializer=notebook_service.ListNotebookRuntimeTemplatesResponse.deserialize, + ) + return self._stubs["list_notebook_runtime_templates"] + + @property + def delete_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeTemplateRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the delete notebook runtime + template method over gRPC. + + Deletes a NotebookRuntimeTemplate. + + Returns: + Callable[[~.DeleteNotebookRuntimeTemplateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_notebook_runtime_template" not in self._stubs: + self._stubs[ + "delete_notebook_runtime_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookRuntimeTemplate", + request_serializer=notebook_service.DeleteNotebookRuntimeTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_notebook_runtime_template"] + + @property + def assign_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.AssignNotebookRuntimeRequest], operations_pb2.Operation + ]: + r"""Return a callable for the assign notebook runtime method over gRPC. + + Assigns a NotebookRuntime to a user for a particular + Notebook file. This method will either returns an + existing assignment or generates a new one. + + Returns: + Callable[[~.AssignNotebookRuntimeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "assign_notebook_runtime" not in self._stubs: + self._stubs["assign_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/AssignNotebookRuntime", + request_serializer=notebook_service.AssignNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["assign_notebook_runtime"] + + @property + def get_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeRequest], notebook_runtime.NotebookRuntime + ]: + r"""Return a callable for the get notebook runtime method over gRPC. + + Gets a NotebookRuntime. + + Returns: + Callable[[~.GetNotebookRuntimeRequest], + ~.NotebookRuntime]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notebook_runtime" not in self._stubs: + self._stubs["get_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/GetNotebookRuntime", + request_serializer=notebook_service.GetNotebookRuntimeRequest.serialize, + response_deserializer=notebook_runtime.NotebookRuntime.deserialize, + ) + return self._stubs["get_notebook_runtime"] + + @property + def list_notebook_runtimes( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimesRequest], + notebook_service.ListNotebookRuntimesResponse, + ]: + r"""Return a callable for the list notebook runtimes method over gRPC. + + Lists NotebookRuntimes in a Location. + + Returns: + Callable[[~.ListNotebookRuntimesRequest], + ~.ListNotebookRuntimesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notebook_runtimes" not in self._stubs: + self._stubs["list_notebook_runtimes"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/ListNotebookRuntimes", + request_serializer=notebook_service.ListNotebookRuntimesRequest.serialize, + response_deserializer=notebook_service.ListNotebookRuntimesResponse.deserialize, + ) + return self._stubs["list_notebook_runtimes"] + + @property + def delete_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete notebook runtime method over gRPC. + + Deletes a NotebookRuntime. + + Returns: + Callable[[~.DeleteNotebookRuntimeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_notebook_runtime" not in self._stubs: + self._stubs["delete_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookRuntime", + request_serializer=notebook_service.DeleteNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_notebook_runtime"] + + @property + def upgrade_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.UpgradeNotebookRuntimeRequest], operations_pb2.Operation + ]: + r"""Return a callable for the upgrade notebook runtime method over gRPC. + + Upgrades a NotebookRuntime. + + Returns: + Callable[[~.UpgradeNotebookRuntimeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "upgrade_notebook_runtime" not in self._stubs: + self._stubs["upgrade_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/UpgradeNotebookRuntime", + request_serializer=notebook_service.UpgradeNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["upgrade_notebook_runtime"] + + @property + def start_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.StartNotebookRuntimeRequest], operations_pb2.Operation + ]: + r"""Return a callable for the start notebook runtime method over gRPC. + + Starts a NotebookRuntime. + + Returns: + Callable[[~.StartNotebookRuntimeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "start_notebook_runtime" not in self._stubs: + self._stubs["start_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/StartNotebookRuntime", + request_serializer=notebook_service.StartNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["start_notebook_runtime"] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("NotebookServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..02593589cc --- /dev/null +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/grpc_asyncio.py @@ -0,0 +1,769 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import notebook_runtime +from google.cloud.aiplatform_v1.types import notebook_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import NotebookServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import NotebookServiceGrpcTransport + + +class NotebookServiceGrpcAsyncIOTransport(NotebookServiceTransport): + """gRPC AsyncIO backend transport for NotebookService. + + The interface for Vertex Notebook service (a.k.a. Colab on + Workbench). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.CreateNotebookRuntimeTemplateRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create notebook runtime + template method over gRPC. + + Creates a NotebookRuntimeTemplate. + + Returns: + Callable[[~.CreateNotebookRuntimeTemplateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_notebook_runtime_template" not in self._stubs: + self._stubs[ + "create_notebook_runtime_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/CreateNotebookRuntimeTemplate", + request_serializer=notebook_service.CreateNotebookRuntimeTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_notebook_runtime_template"] + + @property + def get_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeTemplateRequest], + Awaitable[notebook_runtime.NotebookRuntimeTemplate], + ]: + r"""Return a callable for the get notebook runtime template method over gRPC. + + Gets a NotebookRuntimeTemplate. + + Returns: + Callable[[~.GetNotebookRuntimeTemplateRequest], + Awaitable[~.NotebookRuntimeTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notebook_runtime_template" not in self._stubs: + self._stubs[ + "get_notebook_runtime_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/GetNotebookRuntimeTemplate", + request_serializer=notebook_service.GetNotebookRuntimeTemplateRequest.serialize, + response_deserializer=notebook_runtime.NotebookRuntimeTemplate.deserialize, + ) + return self._stubs["get_notebook_runtime_template"] + + @property + def list_notebook_runtime_templates( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimeTemplatesRequest], + Awaitable[notebook_service.ListNotebookRuntimeTemplatesResponse], + ]: + r"""Return a callable for the list notebook runtime + templates method over gRPC. + + Lists NotebookRuntimeTemplates in a Location. + + Returns: + Callable[[~.ListNotebookRuntimeTemplatesRequest], + Awaitable[~.ListNotebookRuntimeTemplatesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notebook_runtime_templates" not in self._stubs: + self._stubs[ + "list_notebook_runtime_templates" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/ListNotebookRuntimeTemplates", + request_serializer=notebook_service.ListNotebookRuntimeTemplatesRequest.serialize, + response_deserializer=notebook_service.ListNotebookRuntimeTemplatesResponse.deserialize, + ) + return self._stubs["list_notebook_runtime_templates"] + + @property + def delete_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeTemplateRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete notebook runtime + template method over gRPC. + + Deletes a NotebookRuntimeTemplate. + + Returns: + Callable[[~.DeleteNotebookRuntimeTemplateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_notebook_runtime_template" not in self._stubs: + self._stubs[ + "delete_notebook_runtime_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookRuntimeTemplate", + request_serializer=notebook_service.DeleteNotebookRuntimeTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_notebook_runtime_template"] + + @property + def assign_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.AssignNotebookRuntimeRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the assign notebook runtime method over gRPC. + + Assigns a NotebookRuntime to a user for a particular + Notebook file. This method will either returns an + existing assignment or generates a new one. + + Returns: + Callable[[~.AssignNotebookRuntimeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "assign_notebook_runtime" not in self._stubs: + self._stubs["assign_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/AssignNotebookRuntime", + request_serializer=notebook_service.AssignNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["assign_notebook_runtime"] + + @property + def get_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeRequest], + Awaitable[notebook_runtime.NotebookRuntime], + ]: + r"""Return a callable for the get notebook runtime method over gRPC. + + Gets a NotebookRuntime. + + Returns: + Callable[[~.GetNotebookRuntimeRequest], + Awaitable[~.NotebookRuntime]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notebook_runtime" not in self._stubs: + self._stubs["get_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/GetNotebookRuntime", + request_serializer=notebook_service.GetNotebookRuntimeRequest.serialize, + response_deserializer=notebook_runtime.NotebookRuntime.deserialize, + ) + return self._stubs["get_notebook_runtime"] + + @property + def list_notebook_runtimes( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimesRequest], + Awaitable[notebook_service.ListNotebookRuntimesResponse], + ]: + r"""Return a callable for the list notebook runtimes method over gRPC. + + Lists NotebookRuntimes in a Location. + + Returns: + Callable[[~.ListNotebookRuntimesRequest], + Awaitable[~.ListNotebookRuntimesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notebook_runtimes" not in self._stubs: + self._stubs["list_notebook_runtimes"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/ListNotebookRuntimes", + request_serializer=notebook_service.ListNotebookRuntimesRequest.serialize, + response_deserializer=notebook_service.ListNotebookRuntimesResponse.deserialize, + ) + return self._stubs["list_notebook_runtimes"] + + @property + def delete_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete notebook runtime method over gRPC. + + Deletes a NotebookRuntime. + + Returns: + Callable[[~.DeleteNotebookRuntimeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_notebook_runtime" not in self._stubs: + self._stubs["delete_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/DeleteNotebookRuntime", + request_serializer=notebook_service.DeleteNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_notebook_runtime"] + + @property + def upgrade_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.UpgradeNotebookRuntimeRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the upgrade notebook runtime method over gRPC. + + Upgrades a NotebookRuntime. + + Returns: + Callable[[~.UpgradeNotebookRuntimeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "upgrade_notebook_runtime" not in self._stubs: + self._stubs["upgrade_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/UpgradeNotebookRuntime", + request_serializer=notebook_service.UpgradeNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["upgrade_notebook_runtime"] + + @property + def start_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.StartNotebookRuntimeRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the start notebook runtime method over gRPC. + + Starts a NotebookRuntime. + + Returns: + Callable[[~.StartNotebookRuntimeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "start_notebook_runtime" not in self._stubs: + self._stubs["start_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.NotebookService/StartNotebookRuntime", + request_serializer=notebook_service.StartNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["start_notebook_runtime"] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("NotebookServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py new file mode 100644 index 0000000000..7b4cd78110 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/notebook_service/transports/rest.py @@ -0,0 +1,5873 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +from google.cloud.aiplatform_v1.types import notebook_runtime +from google.cloud.aiplatform_v1.types import notebook_service +from google.longrunning import operations_pb2 # type: ignore + +from .base import ( + NotebookServiceTransport, + DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, +) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class NotebookServiceRestInterceptor: + """Interceptor for NotebookService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the NotebookServiceRestTransport. + + .. code-block:: python + class MyCustomNotebookServiceInterceptor(NotebookServiceRestInterceptor): + def pre_assign_notebook_runtime(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_assign_notebook_runtime(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_notebook_runtime_template(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_notebook_runtime_template(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_notebook_runtime(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_notebook_runtime(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_notebook_runtime_template(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_notebook_runtime_template(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_notebook_runtime(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_notebook_runtime(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_notebook_runtime_template(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_notebook_runtime_template(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_notebook_runtimes(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_notebook_runtimes(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_notebook_runtime_templates(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_notebook_runtime_templates(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_start_notebook_runtime(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_start_notebook_runtime(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_upgrade_notebook_runtime(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_upgrade_notebook_runtime(self, response): + logging.log(f"Received response: {response}") + return response + + transport = NotebookServiceRestTransport(interceptor=MyCustomNotebookServiceInterceptor()) + client = NotebookServiceClient(transport=transport) + + + """ + + def pre_assign_notebook_runtime( + self, + request: notebook_service.AssignNotebookRuntimeRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.AssignNotebookRuntimeRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for assign_notebook_runtime + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_assign_notebook_runtime( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for assign_notebook_runtime + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_create_notebook_runtime_template( + self, + request: notebook_service.CreateNotebookRuntimeTemplateRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.CreateNotebookRuntimeTemplateRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for create_notebook_runtime_template + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_create_notebook_runtime_template( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_notebook_runtime_template + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_delete_notebook_runtime( + self, + request: notebook_service.DeleteNotebookRuntimeRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.DeleteNotebookRuntimeRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for delete_notebook_runtime + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_delete_notebook_runtime( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_notebook_runtime + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_delete_notebook_runtime_template( + self, + request: notebook_service.DeleteNotebookRuntimeTemplateRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.DeleteNotebookRuntimeTemplateRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for delete_notebook_runtime_template + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_delete_notebook_runtime_template( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_notebook_runtime_template + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_get_notebook_runtime( + self, + request: notebook_service.GetNotebookRuntimeRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[notebook_service.GetNotebookRuntimeRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_notebook_runtime + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_get_notebook_runtime( + self, response: notebook_runtime.NotebookRuntime + ) -> notebook_runtime.NotebookRuntime: + """Post-rpc interceptor for get_notebook_runtime + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_get_notebook_runtime_template( + self, + request: notebook_service.GetNotebookRuntimeTemplateRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.GetNotebookRuntimeTemplateRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for get_notebook_runtime_template + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_get_notebook_runtime_template( + self, response: notebook_runtime.NotebookRuntimeTemplate + ) -> notebook_runtime.NotebookRuntimeTemplate: + """Post-rpc interceptor for get_notebook_runtime_template + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_list_notebook_runtimes( + self, + request: notebook_service.ListNotebookRuntimesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[notebook_service.ListNotebookRuntimesRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_notebook_runtimes + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_list_notebook_runtimes( + self, response: notebook_service.ListNotebookRuntimesResponse + ) -> notebook_service.ListNotebookRuntimesResponse: + """Post-rpc interceptor for list_notebook_runtimes + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_list_notebook_runtime_templates( + self, + request: notebook_service.ListNotebookRuntimeTemplatesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.ListNotebookRuntimeTemplatesRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for list_notebook_runtime_templates + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_list_notebook_runtime_templates( + self, response: notebook_service.ListNotebookRuntimeTemplatesResponse + ) -> notebook_service.ListNotebookRuntimeTemplatesResponse: + """Post-rpc interceptor for list_notebook_runtime_templates + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_start_notebook_runtime( + self, + request: notebook_service.StartNotebookRuntimeRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[notebook_service.StartNotebookRuntimeRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for start_notebook_runtime + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_start_notebook_runtime( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for start_notebook_runtime + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_upgrade_notebook_runtime( + self, + request: notebook_service.UpgradeNotebookRuntimeRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.UpgradeNotebookRuntimeRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for upgrade_notebook_runtime + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_upgrade_notebook_runtime( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for upgrade_notebook_runtime + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.WaitOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class NotebookServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: NotebookServiceRestInterceptor + + +class NotebookServiceRestTransport(NotebookServiceTransport): + """REST backend transport for NotebookService. + + The interface for Vertex Notebook service (a.k.a. Colab on + Workbench). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via an issue in this + library's source repository. Thank you! + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[NotebookServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via a GitHub issue in + this library's repository. Thank you! + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or NotebookServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ], + "google.longrunning.Operations.WaitOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _AssignNotebookRuntime(NotebookServiceRestStub): + def __hash__(self): + return hash("AssignNotebookRuntime") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.AssignNotebookRuntimeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the assign notebook runtime method over HTTP. + + Args: + request (~.notebook_service.AssignNotebookRuntimeRequest): + The request object. Request message for + [NotebookService.AssignNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.AssignNotebookRuntime]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{parent=projects/*/locations/*}/notebookRuntimes:assign", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_assign_notebook_runtime( + request, metadata + ) + pb_request = notebook_service.AssignNotebookRuntimeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_assign_notebook_runtime(resp) + return resp + + class _CreateNotebookRuntimeTemplate(NotebookServiceRestStub): + def __hash__(self): + return hash("CreateNotebookRuntimeTemplate") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.CreateNotebookRuntimeTemplateRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create notebook runtime + template method over HTTP. + + Args: + request (~.notebook_service.CreateNotebookRuntimeTemplateRequest): + The request object. Request message for + [NotebookService.CreateNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.CreateNotebookRuntimeTemplate]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{parent=projects/*/locations/*}/notebookRuntimeTemplates", + "body": "notebook_runtime_template", + }, + ] + request, metadata = self._interceptor.pre_create_notebook_runtime_template( + request, metadata + ) + pb_request = notebook_service.CreateNotebookRuntimeTemplateRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_notebook_runtime_template(resp) + return resp + + class _DeleteNotebookRuntime(NotebookServiceRestStub): + def __hash__(self): + return hash("DeleteNotebookRuntime") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.DeleteNotebookRuntimeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete notebook runtime method over HTTP. + + Args: + request (~.notebook_service.DeleteNotebookRuntimeRequest): + The request object. Request message for + [NotebookService.DeleteNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntime]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_notebook_runtime( + request, metadata + ) + pb_request = notebook_service.DeleteNotebookRuntimeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_notebook_runtime(resp) + return resp + + class _DeleteNotebookRuntimeTemplate(NotebookServiceRestStub): + def __hash__(self): + return hash("DeleteNotebookRuntimeTemplate") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.DeleteNotebookRuntimeTemplateRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete notebook runtime + template method over HTTP. + + Args: + request (~.notebook_service.DeleteNotebookRuntimeTemplateRequest): + The request object. Request message for + [NotebookService.DeleteNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntimeTemplate]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_notebook_runtime_template( + request, metadata + ) + pb_request = notebook_service.DeleteNotebookRuntimeTemplateRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_notebook_runtime_template(resp) + return resp + + class _GetNotebookRuntime(NotebookServiceRestStub): + def __hash__(self): + return hash("GetNotebookRuntime") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.GetNotebookRuntimeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_runtime.NotebookRuntime: + r"""Call the get notebook runtime method over HTTP. + + Args: + request (~.notebook_service.GetNotebookRuntimeRequest): + The request object. Request message for + [NotebookService.GetNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.GetNotebookRuntime] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notebook_runtime.NotebookRuntime: + A runtime is a virtual machine + allocated to a particular user for a + particular Notebook file on temporary + basis with lifetime limited to 24 hours. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*}", + }, + ] + request, metadata = self._interceptor.pre_get_notebook_runtime( + request, metadata + ) + pb_request = notebook_service.GetNotebookRuntimeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = notebook_runtime.NotebookRuntime() + pb_resp = notebook_runtime.NotebookRuntime.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_notebook_runtime(resp) + return resp + + class _GetNotebookRuntimeTemplate(NotebookServiceRestStub): + def __hash__(self): + return hash("GetNotebookRuntimeTemplate") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.GetNotebookRuntimeTemplateRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_runtime.NotebookRuntimeTemplate: + r"""Call the get notebook runtime + template method over HTTP. + + Args: + request (~.notebook_service.GetNotebookRuntimeTemplateRequest): + The request object. Request message for + [NotebookService.GetNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.GetNotebookRuntimeTemplate] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notebook_runtime.NotebookRuntimeTemplate: + A template that specifies runtime + configurations such as machine type, + runtime version, network configurations, + etc. Multiple runtimes can be created + from a runtime template. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}", + }, + ] + request, metadata = self._interceptor.pre_get_notebook_runtime_template( + request, metadata + ) + pb_request = notebook_service.GetNotebookRuntimeTemplateRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = notebook_runtime.NotebookRuntimeTemplate() + pb_resp = notebook_runtime.NotebookRuntimeTemplate.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_notebook_runtime_template(resp) + return resp + + class _ListNotebookRuntimes(NotebookServiceRestStub): + def __hash__(self): + return hash("ListNotebookRuntimes") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.ListNotebookRuntimesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_service.ListNotebookRuntimesResponse: + r"""Call the list notebook runtimes method over HTTP. + + Args: + request (~.notebook_service.ListNotebookRuntimesRequest): + The request object. Request message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimes]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notebook_service.ListNotebookRuntimesResponse: + Response message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimes]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/notebookRuntimes", + }, + ] + request, metadata = self._interceptor.pre_list_notebook_runtimes( + request, metadata + ) + pb_request = notebook_service.ListNotebookRuntimesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = notebook_service.ListNotebookRuntimesResponse() + pb_resp = notebook_service.ListNotebookRuntimesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_notebook_runtimes(resp) + return resp + + class _ListNotebookRuntimeTemplates(NotebookServiceRestStub): + def __hash__(self): + return hash("ListNotebookRuntimeTemplates") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.ListNotebookRuntimeTemplatesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_service.ListNotebookRuntimeTemplatesResponse: + r"""Call the list notebook runtime + templates method over HTTP. + + Args: + request (~.notebook_service.ListNotebookRuntimeTemplatesRequest): + The request object. Request message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimeTemplates]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notebook_service.ListNotebookRuntimeTemplatesResponse: + Response message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimeTemplates]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/notebookRuntimeTemplates", + }, + ] + request, metadata = self._interceptor.pre_list_notebook_runtime_templates( + request, metadata + ) + pb_request = notebook_service.ListNotebookRuntimeTemplatesRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = notebook_service.ListNotebookRuntimeTemplatesResponse() + pb_resp = notebook_service.ListNotebookRuntimeTemplatesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_notebook_runtime_templates(resp) + return resp + + class _StartNotebookRuntime(NotebookServiceRestStub): + def __hash__(self): + return hash("StartNotebookRuntime") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.StartNotebookRuntimeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the start notebook runtime method over HTTP. + + Args: + request (~.notebook_service.StartNotebookRuntimeRequest): + The request object. Request message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.StartNotebookRuntime]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*}:start", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_start_notebook_runtime( + request, metadata + ) + pb_request = notebook_service.StartNotebookRuntimeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_start_notebook_runtime(resp) + return resp + + class _UpgradeNotebookRuntime(NotebookServiceRestStub): + def __hash__(self): + return hash("UpgradeNotebookRuntime") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.UpgradeNotebookRuntimeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the upgrade notebook runtime method over HTTP. + + Args: + request (~.notebook_service.UpgradeNotebookRuntimeRequest): + The request object. Request message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.UpgradeNotebookRuntime]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/notebookRuntimes/*}:upgrade", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_upgrade_notebook_runtime( + request, metadata + ) + pb_request = notebook_service.UpgradeNotebookRuntimeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_upgrade_notebook_runtime(resp) + return resp + + @property + def assign_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.AssignNotebookRuntimeRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._AssignNotebookRuntime(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.CreateNotebookRuntimeTemplateRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateNotebookRuntimeTemplate(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteNotebookRuntime(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeTemplateRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteNotebookRuntimeTemplate(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeRequest], notebook_runtime.NotebookRuntime + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetNotebookRuntime(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeTemplateRequest], + notebook_runtime.NotebookRuntimeTemplate, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetNotebookRuntimeTemplate(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_notebook_runtimes( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimesRequest], + notebook_service.ListNotebookRuntimesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListNotebookRuntimes(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_notebook_runtime_templates( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimeTemplatesRequest], + notebook_service.ListNotebookRuntimeTemplatesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListNotebookRuntimeTemplates(self._session, self._host, self._interceptor) # type: ignore + + @property + def start_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.StartNotebookRuntimeRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._StartNotebookRuntime(self._session, self._host, self._interceptor) # type: ignore + + @property + def upgrade_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.UpgradeNotebookRuntimeRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpgradeNotebookRuntime(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation(NotebookServiceRestStub): + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_location(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.Location() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_location(resp) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations(NotebookServiceRestStub): + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*}/locations", + }, + ] + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_locations(resp) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy(NotebookServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + ] + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy(NotebookServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + ] + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions(NotebookServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + ] + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(NotebookServiceRestStub): + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation(NotebookServiceRestStub): + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(NotebookServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(NotebookServiceRestStub): + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation(NotebookServiceRestStub): + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_wait_operation(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("NotebookServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/__init__.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/__init__.py new file mode 100644 index 0000000000..2c9d163d97 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PersistentResourceServiceClient +from .async_client import PersistentResourceServiceAsyncClient + +__all__ = ( + "PersistentResourceServiceClient", + "PersistentResourceServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py new file mode 100644 index 0000000000..9d279e3317 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/async_client.py @@ -0,0 +1,1798 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.persistent_resource_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import persistent_resource +from google.cloud.aiplatform_v1.types import ( + persistent_resource as gca_persistent_resource, +) +from google.cloud.aiplatform_v1.types import persistent_resource_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import PersistentResourceServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import PersistentResourceServiceGrpcAsyncIOTransport +from .client import PersistentResourceServiceClient + + +class PersistentResourceServiceAsyncClient: + """A service for managing Vertex AI's machine learning + PersistentResource. + """ + + _client: PersistentResourceServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = PersistentResourceServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = PersistentResourceServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = ( + PersistentResourceServiceClient._DEFAULT_ENDPOINT_TEMPLATE + ) + _DEFAULT_UNIVERSE = PersistentResourceServiceClient._DEFAULT_UNIVERSE + + network_path = staticmethod(PersistentResourceServiceClient.network_path) + parse_network_path = staticmethod( + PersistentResourceServiceClient.parse_network_path + ) + persistent_resource_path = staticmethod( + PersistentResourceServiceClient.persistent_resource_path + ) + parse_persistent_resource_path = staticmethod( + PersistentResourceServiceClient.parse_persistent_resource_path + ) + common_billing_account_path = staticmethod( + PersistentResourceServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + PersistentResourceServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod( + PersistentResourceServiceClient.common_folder_path + ) + parse_common_folder_path = staticmethod( + PersistentResourceServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + PersistentResourceServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + PersistentResourceServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod( + PersistentResourceServiceClient.common_project_path + ) + parse_common_project_path = staticmethod( + PersistentResourceServiceClient.parse_common_project_path + ) + common_location_path = staticmethod( + PersistentResourceServiceClient.common_location_path + ) + parse_common_location_path = staticmethod( + PersistentResourceServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PersistentResourceServiceAsyncClient: The constructed client. + """ + return PersistentResourceServiceClient.from_service_account_info.__func__(PersistentResourceServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PersistentResourceServiceAsyncClient: The constructed client. + """ + return PersistentResourceServiceClient.from_service_account_file.__func__(PersistentResourceServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return PersistentResourceServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> PersistentResourceServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PersistentResourceServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = functools.partial( + type(PersistentResourceServiceClient).get_transport_class, + type(PersistentResourceServiceClient), + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PersistentResourceServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the persistent resource service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.PersistentResourceServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = PersistentResourceServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_persistent_resource( + self, + request: Optional[ + Union[persistent_resource_service.CreatePersistentResourceRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + persistent_resource: Optional[ + gca_persistent_resource.PersistentResource + ] = None, + persistent_resource_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a PersistentResource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_create_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreatePersistentResourceRequest( + parent="parent_value", + persistent_resource_id="persistent_resource_id_value", + ) + + # Make the request + operation = client.create_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.CreatePersistentResourceRequest, dict]]): + The request object. Request message for + [PersistentResourceService.CreatePersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.CreatePersistentResource]. + parent (:class:`str`): + Required. The resource name of the Location to create + the PersistentResource in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + persistent_resource (:class:`google.cloud.aiplatform_v1.types.PersistentResource`): + Required. The PersistentResource to + create. + + This corresponds to the ``persistent_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + persistent_resource_id (:class:`str`): + Required. The ID to use for the PersistentResource, + which become the final component of the + PersistentResource's resource name. + + The maximum length is 63 characters, and valid + characters are ``/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/``. + + This corresponds to the ``persistent_resource_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.PersistentResource` Represents long-lasting resources that are dedicated to users to runs custom + workloads. A PersistentResource can have multiple + node pools and each node pool can have its own + machine spec. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, persistent_resource, persistent_resource_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = persistent_resource_service.CreatePersistentResourceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if persistent_resource is not None: + request.persistent_resource = persistent_resource + if persistent_resource_id is not None: + request.persistent_resource_id = persistent_resource_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_persistent_resource, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_persistent_resource.PersistentResource, + metadata_type=persistent_resource_service.CreatePersistentResourceOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_persistent_resource( + self, + request: Optional[ + Union[persistent_resource_service.GetPersistentResourceRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> persistent_resource.PersistentResource: + r"""Gets a PersistentResource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_get_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetPersistentResourceRequest( + name="name_value", + ) + + # Make the request + response = await client.get_persistent_resource(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.GetPersistentResourceRequest, dict]]): + The request object. Request message for + [PersistentResourceService.GetPersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.GetPersistentResource]. + name (:class:`str`): + Required. The name of the PersistentResource resource. + Format: + ``projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PersistentResource: + Represents long-lasting resources + that are dedicated to users to runs + custom workloads. A PersistentResource + can have multiple node pools and each + node pool can have its own machine spec. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = persistent_resource_service.GetPersistentResourceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_persistent_resource, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_persistent_resources( + self, + request: Optional[ + Union[persistent_resource_service.ListPersistentResourcesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPersistentResourcesAsyncPager: + r"""Lists PersistentResources in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_list_persistent_resources(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListPersistentResourcesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_persistent_resources(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.ListPersistentResourcesRequest, dict]]): + The request object. Request message for + [PersistentResourceService.ListPersistentResource][]. + parent (:class:`str`): + Required. The resource name of the Location to list the + PersistentResources from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.persistent_resource_service.pagers.ListPersistentResourcesAsyncPager: + Response message for + [PersistentResourceService.ListPersistentResources][google.cloud.aiplatform.v1.PersistentResourceService.ListPersistentResources] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = persistent_resource_service.ListPersistentResourcesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_persistent_resources, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListPersistentResourcesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_persistent_resource( + self, + request: Optional[ + Union[persistent_resource_service.DeletePersistentResourceRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a PersistentResource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_delete_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeletePersistentResourceRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.DeletePersistentResourceRequest, dict]]): + The request object. Request message for + [PersistentResourceService.DeletePersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.DeletePersistentResource]. + name (:class:`str`): + Required. The name of the PersistentResource to be + deleted. Format: + ``projects/{project}/locations/{location}/persistentResources/{persistent_resource}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = persistent_resource_service.DeletePersistentResourceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_persistent_resource, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def update_persistent_resource( + self, + request: Optional[ + Union[persistent_resource_service.UpdatePersistentResourceRequest, dict] + ] = None, + *, + persistent_resource: Optional[ + gca_persistent_resource.PersistentResource + ] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a PersistentResource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_update_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdatePersistentResourceRequest( + ) + + # Make the request + operation = client.update_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.UpdatePersistentResourceRequest, dict]]): + The request object. Request message for + UpdatePersistentResource method. + persistent_resource (:class:`google.cloud.aiplatform_v1.types.PersistentResource`): + Required. The PersistentResource to update. + + The PersistentResource's ``name`` field is used to + identify the PersistentResource to update. Format: + ``projects/{project}/locations/{location}/persistentResources/{persistent_resource}`` + + This corresponds to the ``persistent_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Specify the fields to be + overwritten in the PersistentResource by + the update method. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.PersistentResource` Represents long-lasting resources that are dedicated to users to runs custom + workloads. A PersistentResource can have multiple + node pools and each node pool can have its own + machine spec. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([persistent_resource, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = persistent_resource_service.UpdatePersistentResourceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if persistent_resource is not None: + request.persistent_resource = persistent_resource + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_persistent_resource, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("persistent_resource.name", request.persistent_resource.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_persistent_resource.PersistentResource, + metadata_type=persistent_resource_service.UpdatePersistentResourceOperationMetadata, + ) + + # Done; return the response. + return response + + async def reboot_persistent_resource( + self, + request: Optional[ + Union[persistent_resource_service.RebootPersistentResourceRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Reboots a PersistentResource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + async def sample_reboot_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.RebootPersistentResourceRequest( + name="name_value", + ) + + # Make the request + operation = client.reboot_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1.types.RebootPersistentResourceRequest, dict]]): + The request object. Request message for + [PersistentResourceService.RebootPersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.RebootPersistentResource]. + name (:class:`str`): + Required. The name of the PersistentResource resource. + Format: + ``projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.PersistentResource` Represents long-lasting resources that are dedicated to users to runs custom + workloads. A PersistentResource can have multiple + node pools and each node pool can have its own + machine spec. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = persistent_resource_service.RebootPersistentResourceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.reboot_persistent_resource, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + persistent_resource.PersistentResource, + metadata_type=persistent_resource_service.RebootPersistentResourceOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "PersistentResourceServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("PersistentResourceServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py new file mode 100644 index 0000000000..682cd973f6 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/client.py @@ -0,0 +1,2256 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1.services.persistent_resource_service import pagers +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import persistent_resource +from google.cloud.aiplatform_v1.types import ( + persistent_resource as gca_persistent_resource, +) +from google.cloud.aiplatform_v1.types import persistent_resource_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import PersistentResourceServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import PersistentResourceServiceGrpcTransport +from .transports.grpc_asyncio import PersistentResourceServiceGrpcAsyncIOTransport +from .transports.rest import PersistentResourceServiceRestTransport + + +class PersistentResourceServiceClientMeta(type): + """Metaclass for the PersistentResourceService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[PersistentResourceServiceTransport]] + _transport_registry["grpc"] = PersistentResourceServiceGrpcTransport + _transport_registry["grpc_asyncio"] = PersistentResourceServiceGrpcAsyncIOTransport + _transport_registry["rest"] = PersistentResourceServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[PersistentResourceServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PersistentResourceServiceClient(metaclass=PersistentResourceServiceClientMeta): + """A service for managing Vertex AI's machine learning + PersistentResource. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PersistentResourceServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PersistentResourceServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PersistentResourceServiceTransport: + """Returns the transport used by the client instance. + + Returns: + PersistentResourceServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def network_path( + project: str, + network: str, + ) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format( + project=project, + network=network, + ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str, str]: + """Parses a network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/global/networks/(?P.+?)$", path + ) + return m.groupdict() if m else {} + + @staticmethod + def persistent_resource_path( + project: str, + location: str, + persistent_resource: str, + ) -> str: + """Returns a fully-qualified persistent_resource string.""" + return "projects/{project}/locations/{location}/persistentResources/{persistent_resource}".format( + project=project, + location=location, + persistent_resource=persistent_resource, + ) + + @staticmethod + def parse_persistent_resource_path(path: str) -> Dict[str, str]: + """Parses a persistent_resource path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/persistentResources/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = PersistentResourceServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = PersistentResourceServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = ( + PersistentResourceServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = PersistentResourceServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = PersistentResourceServiceClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or PersistentResourceServiceClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, PersistentResourceServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the persistent resource service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PersistentResourceServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = PersistentResourceServiceClient._read_environment_variables() + self._client_cert_source = ( + PersistentResourceServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + ) + self._universe_domain = PersistentResourceServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, PersistentResourceServiceTransport) + if transport_provided: + # transport is a PersistentResourceServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(PersistentResourceServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or PersistentResourceServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(cast(str, transport)) + self._transport = Transport( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + def create_persistent_resource( + self, + request: Optional[ + Union[persistent_resource_service.CreatePersistentResourceRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + persistent_resource: Optional[ + gca_persistent_resource.PersistentResource + ] = None, + persistent_resource_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a PersistentResource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_create_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreatePersistentResourceRequest( + parent="parent_value", + persistent_resource_id="persistent_resource_id_value", + ) + + # Make the request + operation = client.create_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.CreatePersistentResourceRequest, dict]): + The request object. Request message for + [PersistentResourceService.CreatePersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.CreatePersistentResource]. + parent (str): + Required. The resource name of the Location to create + the PersistentResource in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + persistent_resource (google.cloud.aiplatform_v1.types.PersistentResource): + Required. The PersistentResource to + create. + + This corresponds to the ``persistent_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + persistent_resource_id (str): + Required. The ID to use for the PersistentResource, + which become the final component of the + PersistentResource's resource name. + + The maximum length is 63 characters, and valid + characters are ``/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/``. + + This corresponds to the ``persistent_resource_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.PersistentResource` Represents long-lasting resources that are dedicated to users to runs custom + workloads. A PersistentResource can have multiple + node pools and each node pool can have its own + machine spec. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, persistent_resource, persistent_resource_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a persistent_resource_service.CreatePersistentResourceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, persistent_resource_service.CreatePersistentResourceRequest + ): + request = persistent_resource_service.CreatePersistentResourceRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if persistent_resource is not None: + request.persistent_resource = persistent_resource + if persistent_resource_id is not None: + request.persistent_resource_id = persistent_resource_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_persistent_resource + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_persistent_resource.PersistentResource, + metadata_type=persistent_resource_service.CreatePersistentResourceOperationMetadata, + ) + + # Done; return the response. + return response + + def get_persistent_resource( + self, + request: Optional[ + Union[persistent_resource_service.GetPersistentResourceRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> persistent_resource.PersistentResource: + r"""Gets a PersistentResource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_get_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetPersistentResourceRequest( + name="name_value", + ) + + # Make the request + response = client.get_persistent_resource(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.GetPersistentResourceRequest, dict]): + The request object. Request message for + [PersistentResourceService.GetPersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.GetPersistentResource]. + name (str): + Required. The name of the PersistentResource resource. + Format: + ``projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.types.PersistentResource: + Represents long-lasting resources + that are dedicated to users to runs + custom workloads. A PersistentResource + can have multiple node pools and each + node pool can have its own machine spec. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a persistent_resource_service.GetPersistentResourceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, persistent_resource_service.GetPersistentResourceRequest + ): + request = persistent_resource_service.GetPersistentResourceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_persistent_resource] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_persistent_resources( + self, + request: Optional[ + Union[persistent_resource_service.ListPersistentResourcesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPersistentResourcesPager: + r"""Lists PersistentResources in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_list_persistent_resources(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListPersistentResourcesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_persistent_resources(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.ListPersistentResourcesRequest, dict]): + The request object. Request message for + [PersistentResourceService.ListPersistentResource][]. + parent (str): + Required. The resource name of the Location to list the + PersistentResources from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1.services.persistent_resource_service.pagers.ListPersistentResourcesPager: + Response message for + [PersistentResourceService.ListPersistentResources][google.cloud.aiplatform.v1.PersistentResourceService.ListPersistentResources] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a persistent_resource_service.ListPersistentResourcesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, persistent_resource_service.ListPersistentResourcesRequest + ): + request = persistent_resource_service.ListPersistentResourcesRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_persistent_resources + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPersistentResourcesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_persistent_resource( + self, + request: Optional[ + Union[persistent_resource_service.DeletePersistentResourceRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a PersistentResource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_delete_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeletePersistentResourceRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.DeletePersistentResourceRequest, dict]): + The request object. Request message for + [PersistentResourceService.DeletePersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.DeletePersistentResource]. + name (str): + Required. The name of the PersistentResource to be + deleted. Format: + ``projects/{project}/locations/{location}/persistentResources/{persistent_resource}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a persistent_resource_service.DeletePersistentResourceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, persistent_resource_service.DeletePersistentResourceRequest + ): + request = persistent_resource_service.DeletePersistentResourceRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_persistent_resource + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def update_persistent_resource( + self, + request: Optional[ + Union[persistent_resource_service.UpdatePersistentResourceRequest, dict] + ] = None, + *, + persistent_resource: Optional[ + gca_persistent_resource.PersistentResource + ] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Updates a PersistentResource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_update_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdatePersistentResourceRequest( + ) + + # Make the request + operation = client.update_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.UpdatePersistentResourceRequest, dict]): + The request object. Request message for + UpdatePersistentResource method. + persistent_resource (google.cloud.aiplatform_v1.types.PersistentResource): + Required. The PersistentResource to update. + + The PersistentResource's ``name`` field is used to + identify the PersistentResource to update. Format: + ``projects/{project}/locations/{location}/persistentResources/{persistent_resource}`` + + This corresponds to the ``persistent_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Specify the fields to be + overwritten in the PersistentResource by + the update method. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.PersistentResource` Represents long-lasting resources that are dedicated to users to runs custom + workloads. A PersistentResource can have multiple + node pools and each node pool can have its own + machine spec. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([persistent_resource, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a persistent_resource_service.UpdatePersistentResourceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, persistent_resource_service.UpdatePersistentResourceRequest + ): + request = persistent_resource_service.UpdatePersistentResourceRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if persistent_resource is not None: + request.persistent_resource = persistent_resource + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.update_persistent_resource + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("persistent_resource.name", request.persistent_resource.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_persistent_resource.PersistentResource, + metadata_type=persistent_resource_service.UpdatePersistentResourceOperationMetadata, + ) + + # Done; return the response. + return response + + def reboot_persistent_resource( + self, + request: Optional[ + Union[persistent_resource_service.RebootPersistentResourceRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Reboots a PersistentResource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1 + + def sample_reboot_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.RebootPersistentResourceRequest( + name="name_value", + ) + + # Make the request + operation = client.reboot_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1.types.RebootPersistentResourceRequest, dict]): + The request object. Request message for + [PersistentResourceService.RebootPersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.RebootPersistentResource]. + name (str): + Required. The name of the PersistentResource resource. + Format: + ``projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1.types.PersistentResource` Represents long-lasting resources that are dedicated to users to runs custom + workloads. A PersistentResource can have multiple + node pools and each node pool can have its own + machine spec. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a persistent_resource_service.RebootPersistentResourceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, persistent_resource_service.RebootPersistentResourceRequest + ): + request = persistent_resource_service.RebootPersistentResourceRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.reboot_persistent_resource + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + persistent_resource.PersistentResource, + metadata_type=persistent_resource_service.RebootPersistentResourceOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "PersistentResourceServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("PersistentResourceServiceClient",) diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/pagers.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/pagers.py new file mode 100644 index 0000000000..fbcfeaee1f --- /dev/null +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/pagers.py @@ -0,0 +1,168 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, +) + +from google.cloud.aiplatform_v1.types import persistent_resource +from google.cloud.aiplatform_v1.types import persistent_resource_service + + +class ListPersistentResourcesPager: + """A pager for iterating through ``list_persistent_resources`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListPersistentResourcesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``persistent_resources`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPersistentResources`` requests and continue to iterate + through the ``persistent_resources`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListPersistentResourcesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., persistent_resource_service.ListPersistentResourcesResponse + ], + request: persistent_resource_service.ListPersistentResourcesRequest, + response: persistent_resource_service.ListPersistentResourcesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListPersistentResourcesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListPersistentResourcesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = persistent_resource_service.ListPersistentResourcesRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages( + self, + ) -> Iterator[persistent_resource_service.ListPersistentResourcesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[persistent_resource.PersistentResource]: + for page in self.pages: + yield from page.persistent_resources + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListPersistentResourcesAsyncPager: + """A pager for iterating through ``list_persistent_resources`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1.types.ListPersistentResourcesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``persistent_resources`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListPersistentResources`` requests and continue to iterate + through the ``persistent_resources`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1.types.ListPersistentResourcesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[persistent_resource_service.ListPersistentResourcesResponse] + ], + request: persistent_resource_service.ListPersistentResourcesRequest, + response: persistent_resource_service.ListPersistentResourcesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1.types.ListPersistentResourcesRequest): + The initial request object. + response (google.cloud.aiplatform_v1.types.ListPersistentResourcesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = persistent_resource_service.ListPersistentResourcesRequest( + request + ) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[persistent_resource_service.ListPersistentResourcesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[persistent_resource.PersistentResource]: + async def async_generator(): + async for page in self.pages: + for response in page.persistent_resources: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/__init__.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/__init__.py new file mode 100644 index 0000000000..d1092ad95a --- /dev/null +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PersistentResourceServiceTransport +from .grpc import PersistentResourceServiceGrpcTransport +from .grpc_asyncio import PersistentResourceServiceGrpcAsyncIOTransport +from .rest import PersistentResourceServiceRestTransport +from .rest import PersistentResourceServiceRestInterceptor + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[PersistentResourceServiceTransport]] +_transport_registry["grpc"] = PersistentResourceServiceGrpcTransport +_transport_registry["grpc_asyncio"] = PersistentResourceServiceGrpcAsyncIOTransport +_transport_registry["rest"] = PersistentResourceServiceRestTransport + +__all__ = ( + "PersistentResourceServiceTransport", + "PersistentResourceServiceGrpcTransport", + "PersistentResourceServiceGrpcAsyncIOTransport", + "PersistentResourceServiceRestTransport", + "PersistentResourceServiceRestInterceptor", +) diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py new file mode 100644 index 0000000000..9136c9fbf4 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/base.py @@ -0,0 +1,340 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1.types import persistent_resource +from google.cloud.aiplatform_v1.types import persistent_resource_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class PersistentResourceServiceTransport(abc.ABC): + """Abstract transport class for PersistentResourceService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_persistent_resource: gapic_v1.method.wrap_method( + self.create_persistent_resource, + default_timeout=None, + client_info=client_info, + ), + self.get_persistent_resource: gapic_v1.method.wrap_method( + self.get_persistent_resource, + default_timeout=None, + client_info=client_info, + ), + self.list_persistent_resources: gapic_v1.method.wrap_method( + self.list_persistent_resources, + default_timeout=None, + client_info=client_info, + ), + self.delete_persistent_resource: gapic_v1.method.wrap_method( + self.delete_persistent_resource, + default_timeout=None, + client_info=client_info, + ), + self.update_persistent_resource: gapic_v1.method.wrap_method( + self.update_persistent_resource, + default_timeout=None, + client_info=client_info, + ), + self.reboot_persistent_resource: gapic_v1.method.wrap_method( + self.reboot_persistent_resource, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.CreatePersistentResourceRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.GetPersistentResourceRequest], + Union[ + persistent_resource.PersistentResource, + Awaitable[persistent_resource.PersistentResource], + ], + ]: + raise NotImplementedError() + + @property + def list_persistent_resources( + self, + ) -> Callable[ + [persistent_resource_service.ListPersistentResourcesRequest], + Union[ + persistent_resource_service.ListPersistentResourcesResponse, + Awaitable[persistent_resource_service.ListPersistentResourcesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.DeletePersistentResourceRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def update_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.UpdatePersistentResourceRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def reboot_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.RebootPersistentResourceRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("PersistentResourceServiceTransport",) diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc.py new file mode 100644 index 0000000000..ca685bcfba --- /dev/null +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc.py @@ -0,0 +1,639 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1.types import persistent_resource +from google.cloud.aiplatform_v1.types import persistent_resource_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import PersistentResourceServiceTransport, DEFAULT_CLIENT_INFO + + +class PersistentResourceServiceGrpcTransport(PersistentResourceServiceTransport): + """gRPC backend transport for PersistentResourceService. + + A service for managing Vertex AI's machine learning + PersistentResource. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.CreatePersistentResourceRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the create persistent resource method over gRPC. + + Creates a PersistentResource. + + Returns: + Callable[[~.CreatePersistentResourceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_persistent_resource" not in self._stubs: + self._stubs["create_persistent_resource"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/CreatePersistentResource", + request_serializer=persistent_resource_service.CreatePersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_persistent_resource"] + + @property + def get_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.GetPersistentResourceRequest], + persistent_resource.PersistentResource, + ]: + r"""Return a callable for the get persistent resource method over gRPC. + + Gets a PersistentResource. + + Returns: + Callable[[~.GetPersistentResourceRequest], + ~.PersistentResource]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_persistent_resource" not in self._stubs: + self._stubs["get_persistent_resource"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/GetPersistentResource", + request_serializer=persistent_resource_service.GetPersistentResourceRequest.serialize, + response_deserializer=persistent_resource.PersistentResource.deserialize, + ) + return self._stubs["get_persistent_resource"] + + @property + def list_persistent_resources( + self, + ) -> Callable[ + [persistent_resource_service.ListPersistentResourcesRequest], + persistent_resource_service.ListPersistentResourcesResponse, + ]: + r"""Return a callable for the list persistent resources method over gRPC. + + Lists PersistentResources in a Location. + + Returns: + Callable[[~.ListPersistentResourcesRequest], + ~.ListPersistentResourcesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_persistent_resources" not in self._stubs: + self._stubs["list_persistent_resources"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/ListPersistentResources", + request_serializer=persistent_resource_service.ListPersistentResourcesRequest.serialize, + response_deserializer=persistent_resource_service.ListPersistentResourcesResponse.deserialize, + ) + return self._stubs["list_persistent_resources"] + + @property + def delete_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.DeletePersistentResourceRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the delete persistent resource method over gRPC. + + Deletes a PersistentResource. + + Returns: + Callable[[~.DeletePersistentResourceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_persistent_resource" not in self._stubs: + self._stubs["delete_persistent_resource"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/DeletePersistentResource", + request_serializer=persistent_resource_service.DeletePersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_persistent_resource"] + + @property + def update_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.UpdatePersistentResourceRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the update persistent resource method over gRPC. + + Updates a PersistentResource. + + Returns: + Callable[[~.UpdatePersistentResourceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_persistent_resource" not in self._stubs: + self._stubs["update_persistent_resource"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/UpdatePersistentResource", + request_serializer=persistent_resource_service.UpdatePersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_persistent_resource"] + + @property + def reboot_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.RebootPersistentResourceRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the reboot persistent resource method over gRPC. + + Reboots a PersistentResource. + + Returns: + Callable[[~.RebootPersistentResourceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "reboot_persistent_resource" not in self._stubs: + self._stubs["reboot_persistent_resource"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/RebootPersistentResource", + request_serializer=persistent_resource_service.RebootPersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["reboot_persistent_resource"] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("PersistentResourceServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..bd6b37ef34 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/grpc_asyncio.py @@ -0,0 +1,640 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1.types import persistent_resource +from google.cloud.aiplatform_v1.types import persistent_resource_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import PersistentResourceServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import PersistentResourceServiceGrpcTransport + + +class PersistentResourceServiceGrpcAsyncIOTransport(PersistentResourceServiceTransport): + """gRPC AsyncIO backend transport for PersistentResourceService. + + A service for managing Vertex AI's machine learning + PersistentResource. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.CreatePersistentResourceRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create persistent resource method over gRPC. + + Creates a PersistentResource. + + Returns: + Callable[[~.CreatePersistentResourceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_persistent_resource" not in self._stubs: + self._stubs["create_persistent_resource"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/CreatePersistentResource", + request_serializer=persistent_resource_service.CreatePersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_persistent_resource"] + + @property + def get_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.GetPersistentResourceRequest], + Awaitable[persistent_resource.PersistentResource], + ]: + r"""Return a callable for the get persistent resource method over gRPC. + + Gets a PersistentResource. + + Returns: + Callable[[~.GetPersistentResourceRequest], + Awaitable[~.PersistentResource]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_persistent_resource" not in self._stubs: + self._stubs["get_persistent_resource"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/GetPersistentResource", + request_serializer=persistent_resource_service.GetPersistentResourceRequest.serialize, + response_deserializer=persistent_resource.PersistentResource.deserialize, + ) + return self._stubs["get_persistent_resource"] + + @property + def list_persistent_resources( + self, + ) -> Callable[ + [persistent_resource_service.ListPersistentResourcesRequest], + Awaitable[persistent_resource_service.ListPersistentResourcesResponse], + ]: + r"""Return a callable for the list persistent resources method over gRPC. + + Lists PersistentResources in a Location. + + Returns: + Callable[[~.ListPersistentResourcesRequest], + Awaitable[~.ListPersistentResourcesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_persistent_resources" not in self._stubs: + self._stubs["list_persistent_resources"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/ListPersistentResources", + request_serializer=persistent_resource_service.ListPersistentResourcesRequest.serialize, + response_deserializer=persistent_resource_service.ListPersistentResourcesResponse.deserialize, + ) + return self._stubs["list_persistent_resources"] + + @property + def delete_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.DeletePersistentResourceRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete persistent resource method over gRPC. + + Deletes a PersistentResource. + + Returns: + Callable[[~.DeletePersistentResourceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_persistent_resource" not in self._stubs: + self._stubs["delete_persistent_resource"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/DeletePersistentResource", + request_serializer=persistent_resource_service.DeletePersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_persistent_resource"] + + @property + def update_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.UpdatePersistentResourceRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the update persistent resource method over gRPC. + + Updates a PersistentResource. + + Returns: + Callable[[~.UpdatePersistentResourceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_persistent_resource" not in self._stubs: + self._stubs["update_persistent_resource"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/UpdatePersistentResource", + request_serializer=persistent_resource_service.UpdatePersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["update_persistent_resource"] + + @property + def reboot_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.RebootPersistentResourceRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the reboot persistent resource method over gRPC. + + Reboots a PersistentResource. + + Returns: + Callable[[~.RebootPersistentResourceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "reboot_persistent_resource" not in self._stubs: + self._stubs["reboot_persistent_resource"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1.PersistentResourceService/RebootPersistentResource", + request_serializer=persistent_resource_service.RebootPersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["reboot_persistent_resource"] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("PersistentResourceServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py new file mode 100644 index 0000000000..d929a8f7d8 --- /dev/null +++ b/google/cloud/aiplatform_v1/services/persistent_resource_service/transports/rest.py @@ -0,0 +1,5367 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +from google.cloud.aiplatform_v1.types import persistent_resource +from google.cloud.aiplatform_v1.types import persistent_resource_service +from google.longrunning import operations_pb2 # type: ignore + +from .base import ( + PersistentResourceServiceTransport, + DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, +) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class PersistentResourceServiceRestInterceptor: + """Interceptor for PersistentResourceService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the PersistentResourceServiceRestTransport. + + .. code-block:: python + class MyCustomPersistentResourceServiceInterceptor(PersistentResourceServiceRestInterceptor): + def pre_create_persistent_resource(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_persistent_resource(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_persistent_resource(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_persistent_resource(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_persistent_resource(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_persistent_resource(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_persistent_resources(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_persistent_resources(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_reboot_persistent_resource(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_reboot_persistent_resource(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_persistent_resource(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_persistent_resource(self, response): + logging.log(f"Received response: {response}") + return response + + transport = PersistentResourceServiceRestTransport(interceptor=MyCustomPersistentResourceServiceInterceptor()) + client = PersistentResourceServiceClient(transport=transport) + + + """ + + def pre_create_persistent_resource( + self, + request: persistent_resource_service.CreatePersistentResourceRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + persistent_resource_service.CreatePersistentResourceRequest, + Sequence[Tuple[str, str]], + ]: + """Pre-rpc interceptor for create_persistent_resource + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_create_persistent_resource( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_persistent_resource + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_delete_persistent_resource( + self, + request: persistent_resource_service.DeletePersistentResourceRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + persistent_resource_service.DeletePersistentResourceRequest, + Sequence[Tuple[str, str]], + ]: + """Pre-rpc interceptor for delete_persistent_resource + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_delete_persistent_resource( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_persistent_resource + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_get_persistent_resource( + self, + request: persistent_resource_service.GetPersistentResourceRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + persistent_resource_service.GetPersistentResourceRequest, + Sequence[Tuple[str, str]], + ]: + """Pre-rpc interceptor for get_persistent_resource + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_get_persistent_resource( + self, response: persistent_resource.PersistentResource + ) -> persistent_resource.PersistentResource: + """Post-rpc interceptor for get_persistent_resource + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_list_persistent_resources( + self, + request: persistent_resource_service.ListPersistentResourcesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + persistent_resource_service.ListPersistentResourcesRequest, + Sequence[Tuple[str, str]], + ]: + """Pre-rpc interceptor for list_persistent_resources + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_list_persistent_resources( + self, response: persistent_resource_service.ListPersistentResourcesResponse + ) -> persistent_resource_service.ListPersistentResourcesResponse: + """Post-rpc interceptor for list_persistent_resources + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_reboot_persistent_resource( + self, + request: persistent_resource_service.RebootPersistentResourceRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + persistent_resource_service.RebootPersistentResourceRequest, + Sequence[Tuple[str, str]], + ]: + """Pre-rpc interceptor for reboot_persistent_resource + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_reboot_persistent_resource( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for reboot_persistent_resource + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_update_persistent_resource( + self, + request: persistent_resource_service.UpdatePersistentResourceRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + persistent_resource_service.UpdatePersistentResourceRequest, + Sequence[Tuple[str, str]], + ]: + """Pre-rpc interceptor for update_persistent_resource + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_update_persistent_resource( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for update_persistent_resource + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.WaitOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class PersistentResourceServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: PersistentResourceServiceRestInterceptor + + +class PersistentResourceServiceRestTransport(PersistentResourceServiceTransport): + """REST backend transport for PersistentResourceService. + + A service for managing Vertex AI's machine learning + PersistentResource. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via an issue in this + library's source repository. Thank you! + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[PersistentResourceServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via a GitHub issue in + this library's repository. Thank you! + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or PersistentResourceServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ], + "google.longrunning.Operations.WaitOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _CreatePersistentResource(PersistentResourceServiceRestStub): + def __hash__(self): + return hash("CreatePersistentResource") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "persistentResourceId": "", + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: persistent_resource_service.CreatePersistentResourceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create persistent + resource method over HTTP. + + Args: + request (~.persistent_resource_service.CreatePersistentResourceRequest): + The request object. Request message for + [PersistentResourceService.CreatePersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.CreatePersistentResource]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{parent=projects/*/locations/*}/persistentResources", + "body": "persistent_resource", + }, + ] + request, metadata = self._interceptor.pre_create_persistent_resource( + request, metadata + ) + pb_request = persistent_resource_service.CreatePersistentResourceRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_persistent_resource(resp) + return resp + + class _DeletePersistentResource(PersistentResourceServiceRestStub): + def __hash__(self): + return hash("DeletePersistentResource") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: persistent_resource_service.DeletePersistentResourceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete persistent + resource method over HTTP. + + Args: + request (~.persistent_resource_service.DeletePersistentResourceRequest): + The request object. Request message for + [PersistentResourceService.DeletePersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.DeletePersistentResource]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_persistent_resource( + request, metadata + ) + pb_request = persistent_resource_service.DeletePersistentResourceRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_persistent_resource(resp) + return resp + + class _GetPersistentResource(PersistentResourceServiceRestStub): + def __hash__(self): + return hash("GetPersistentResource") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: persistent_resource_service.GetPersistentResourceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> persistent_resource.PersistentResource: + r"""Call the get persistent resource method over HTTP. + + Args: + request (~.persistent_resource_service.GetPersistentResourceRequest): + The request object. Request message for + [PersistentResourceService.GetPersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.GetPersistentResource]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.persistent_resource.PersistentResource: + Represents long-lasting resources + that are dedicated to users to runs + custom workloads. A PersistentResource + can have multiple node pools and each + node pool can have its own machine spec. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}", + }, + ] + request, metadata = self._interceptor.pre_get_persistent_resource( + request, metadata + ) + pb_request = persistent_resource_service.GetPersistentResourceRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = persistent_resource.PersistentResource() + pb_resp = persistent_resource.PersistentResource.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_persistent_resource(resp) + return resp + + class _ListPersistentResources(PersistentResourceServiceRestStub): + def __hash__(self): + return hash("ListPersistentResources") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: persistent_resource_service.ListPersistentResourcesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> persistent_resource_service.ListPersistentResourcesResponse: + r"""Call the list persistent resources method over HTTP. + + Args: + request (~.persistent_resource_service.ListPersistentResourcesRequest): + The request object. Request message for + [PersistentResourceService.ListPersistentResource][]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.persistent_resource_service.ListPersistentResourcesResponse: + Response message for + [PersistentResourceService.ListPersistentResources][google.cloud.aiplatform.v1.PersistentResourceService.ListPersistentResources] + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1/{parent=projects/*/locations/*}/persistentResources", + }, + ] + request, metadata = self._interceptor.pre_list_persistent_resources( + request, metadata + ) + pb_request = persistent_resource_service.ListPersistentResourcesRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = persistent_resource_service.ListPersistentResourcesResponse() + pb_resp = persistent_resource_service.ListPersistentResourcesResponse.pb( + resp + ) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_persistent_resources(resp) + return resp + + class _RebootPersistentResource(PersistentResourceServiceRestStub): + def __hash__(self): + return hash("RebootPersistentResource") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: persistent_resource_service.RebootPersistentResourceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the reboot persistent + resource method over HTTP. + + Args: + request (~.persistent_resource_service.RebootPersistentResourceRequest): + The request object. Request message for + [PersistentResourceService.RebootPersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.RebootPersistentResource]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}:reboot", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_reboot_persistent_resource( + request, metadata + ) + pb_request = persistent_resource_service.RebootPersistentResourceRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_reboot_persistent_resource(resp) + return resp + + class _UpdatePersistentResource(PersistentResourceServiceRestStub): + def __hash__(self): + return hash("UpdatePersistentResource") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: persistent_resource_service.UpdatePersistentResourceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the update persistent + resource method over HTTP. + + Args: + request (~.persistent_resource_service.UpdatePersistentResourceRequest): + The request object. Request message for + UpdatePersistentResource method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1/{persistent_resource.name=projects/*/locations/*/persistentResources/*}", + "body": "persistent_resource", + }, + ] + request, metadata = self._interceptor.pre_update_persistent_resource( + request, metadata + ) + pb_request = persistent_resource_service.UpdatePersistentResourceRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_persistent_resource(resp) + return resp + + @property + def create_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.CreatePersistentResourceRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreatePersistentResource(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.DeletePersistentResourceRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeletePersistentResource(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.GetPersistentResourceRequest], + persistent_resource.PersistentResource, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetPersistentResource(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_persistent_resources( + self, + ) -> Callable[ + [persistent_resource_service.ListPersistentResourcesRequest], + persistent_resource_service.ListPersistentResourcesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListPersistentResources(self._session, self._host, self._interceptor) # type: ignore + + @property + def reboot_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.RebootPersistentResourceRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._RebootPersistentResource(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.UpdatePersistentResourceRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdatePersistentResource(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation(PersistentResourceServiceRestStub): + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_location(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.Location() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_location(resp) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations(PersistentResourceServiceRestStub): + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*}/locations", + }, + ] + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_locations(resp) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy(PersistentResourceServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + ] + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy(PersistentResourceServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + ] + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions(PersistentResourceServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + ] + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(PersistentResourceServiceRestStub): + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation(PersistentResourceServiceRestStub): + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(PersistentResourceServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(PersistentResourceServiceRestStub): + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation(PersistentResourceServiceRestStub): + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_wait_operation(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("PersistentResourceServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py index a68ce38f92..2ba1e0d406 100644 --- a/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/pipeline_service/transports/rest.py @@ -896,6 +896,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1036,6 +1040,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1190,6 +1198,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1354,6 +1366,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1520,6 +1536,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1688,6 +1708,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1862,6 +1886,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -1964,7 +1992,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2018,6 +2046,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2192,6 +2224,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2344,6 +2380,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -4226,6 +4266,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4366,6 +4410,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4577,6 +4625,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4741,6 +4793,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4967,6 +5023,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5135,6 +5195,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -5370,6 +5434,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5472,7 +5540,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -5526,6 +5594,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5761,6 +5833,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5913,6 +5989,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py index 732fa729b9..b340cf59f4 100644 --- a/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/prediction_service/transports/rest.py @@ -2478,6 +2478,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2618,6 +2622,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2829,6 +2837,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2993,6 +3005,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -3219,6 +3235,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3387,6 +3407,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -3622,6 +3646,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -3724,7 +3752,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -3778,6 +3806,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4013,6 +4045,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -4165,6 +4201,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/schedule_service/transports/rest.py b/google/cloud/aiplatform_v1/services/schedule_service/transports/rest.py index 42b02d8e8e..70df919f85 100644 --- a/google/cloud/aiplatform_v1/services/schedule_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/schedule_service/transports/rest.py @@ -724,6 +724,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -864,6 +868,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1018,6 +1026,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1182,6 +1194,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1348,6 +1364,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1516,6 +1536,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1690,6 +1714,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -1792,7 +1820,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -1846,6 +1874,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2020,6 +2052,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2172,6 +2208,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3527,6 +3567,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3667,6 +3711,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3878,6 +3926,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4042,6 +4094,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4268,6 +4324,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4436,6 +4496,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4671,6 +4735,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4773,7 +4841,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -4827,6 +4895,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5062,6 +5134,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5214,6 +5290,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest.py b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest.py index 21f6eb67ff..d263306969 100644 --- a/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/specialist_pool_service/transports/rest.py @@ -705,6 +705,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -845,6 +849,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -999,6 +1007,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1163,6 +1175,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1329,6 +1345,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1497,6 +1517,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1671,6 +1695,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -1773,7 +1801,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -1827,6 +1855,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2001,6 +2033,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2153,6 +2189,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3356,6 +3396,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3496,6 +3540,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3707,6 +3755,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3871,6 +3923,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4097,6 +4153,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4265,6 +4325,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -4500,6 +4564,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4602,7 +4670,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -4656,6 +4724,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4891,6 +4963,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5043,6 +5119,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py index c94cd51781..22e8d4f9d8 100644 --- a/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/tensorboard_service/transports/rest.py @@ -1534,6 +1534,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1674,6 +1678,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1828,6 +1836,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1992,6 +2004,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2158,6 +2174,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2326,6 +2346,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -2500,6 +2524,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2602,7 +2630,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2656,6 +2684,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2830,6 +2862,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2982,6 +3018,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -6809,6 +6849,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -6949,6 +6993,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -7160,6 +7208,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -7324,6 +7376,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -7550,6 +7606,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -7718,6 +7778,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -7953,6 +8017,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -8055,7 +8123,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -8109,6 +8177,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -8344,6 +8416,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -8496,6 +8572,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py index e109cd207f..211eee9e96 100644 --- a/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py +++ b/google/cloud/aiplatform_v1/services/vizier_service/transports/rest.py @@ -962,6 +962,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1102,6 +1106,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1256,6 +1264,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1420,6 +1432,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1586,6 +1602,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1754,6 +1774,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -1928,6 +1952,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2030,7 +2058,7 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -2084,6 +2112,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2258,6 +2290,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2410,6 +2446,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -4560,6 +4600,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4700,6 +4744,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4911,6 +4959,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5075,6 +5127,10 @@ def __call__( "method": "delete", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "delete", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -5301,6 +5357,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5469,6 +5529,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", @@ -5704,6 +5768,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5806,7 +5874,7 @@ def __call__( }, { "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + "uri": "/v1/{name=projects/*/locations/*/tuningJobs/*}/operations", }, { "method": "get", @@ -5860,6 +5928,10 @@ def __call__( "method": "get", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -6095,6 +6167,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -6247,6 +6323,10 @@ def __call__( "method": "post", "uri": "/v1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/v1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/v1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1/types/__init__.py b/google/cloud/aiplatform_v1/types/__init__.py index 269c468f88..179dddee00 100644 --- a/google/cloud/aiplatform_v1/types/__init__.py +++ b/google/cloud/aiplatform_v1/types/__init__.py @@ -326,6 +326,13 @@ UpdateFeaturestoreOperationMetadata, UpdateFeaturestoreRequest, ) +from .genai_tuning_service import ( + CancelTuningJobRequest, + CreateTuningJobRequest, + GetTuningJobRequest, + ListTuningJobsRequest, + ListTuningJobsResponse, +) from .hyperparameter_tuning_job import ( HyperparameterTuningJob, ) @@ -451,6 +458,7 @@ NfsMount, PersistentDiskSpec, ResourcesConsumed, + ShieldedVmConfig, ) from .manual_batch_tuning_parameters import ( ManualBatchTuningParameters, @@ -606,6 +614,43 @@ NasTrial, NasTrialDetail, ) +from .network_spec import ( + NetworkSpec, +) +from .notebook_euc_config import ( + NotebookEucConfig, +) +from .notebook_idle_shutdown_config import ( + NotebookIdleShutdownConfig, +) +from .notebook_runtime import ( + NotebookRuntime, + NotebookRuntimeTemplate, + NotebookRuntimeType, +) +from .notebook_runtime_template_ref import ( + NotebookRuntimeTemplateRef, +) +from .notebook_service import ( + AssignNotebookRuntimeOperationMetadata, + AssignNotebookRuntimeRequest, + CreateNotebookRuntimeTemplateOperationMetadata, + CreateNotebookRuntimeTemplateRequest, + DeleteNotebookRuntimeRequest, + DeleteNotebookRuntimeTemplateRequest, + GetNotebookRuntimeRequest, + GetNotebookRuntimeTemplateRequest, + ListNotebookRuntimesRequest, + ListNotebookRuntimesResponse, + ListNotebookRuntimeTemplatesRequest, + ListNotebookRuntimeTemplatesResponse, + StartNotebookRuntimeOperationMetadata, + StartNotebookRuntimeRequest, + StartNotebookRuntimeResponse, + UpgradeNotebookRuntimeOperationMetadata, + UpgradeNotebookRuntimeRequest, + UpgradeNotebookRuntimeResponse, +) from .openapi import ( Schema, Type, @@ -614,6 +659,26 @@ DeleteOperationMetadata, GenericOperationMetadata, ) +from .persistent_resource import ( + PersistentResource, + RaySpec, + ResourcePool, + ResourceRuntime, + ResourceRuntimeSpec, + ServiceAccountSpec, +) +from .persistent_resource_service import ( + CreatePersistentResourceOperationMetadata, + CreatePersistentResourceRequest, + DeletePersistentResourceRequest, + GetPersistentResourceRequest, + ListPersistentResourcesRequest, + ListPersistentResourcesResponse, + RebootPersistentResourceOperationMetadata, + RebootPersistentResourceRequest, + UpdatePersistentResourceOperationMetadata, + UpdatePersistentResourceRequest, +) from .pipeline_failure_policy import ( PipelineFailurePolicy, ) @@ -800,6 +865,15 @@ TimestampSplit, TrainingPipeline, ) +from .tuning_job import ( + SupervisedHyperParameters, + SupervisedTuningDatasetDistribution, + SupervisedTuningDataStats, + SupervisedTuningSpec, + TunedModel, + TuningDataStats, + TuningJob, +) from .types import ( BoolArray, DoubleArray, @@ -1071,6 +1145,11 @@ "UpdateFeatureRequest", "UpdateFeaturestoreOperationMetadata", "UpdateFeaturestoreRequest", + "CancelTuningJobRequest", + "CreateTuningJobRequest", + "GetTuningJobRequest", + "ListTuningJobsRequest", + "ListTuningJobsResponse", "HyperparameterTuningJob", "Index", "IndexDatapoint", @@ -1175,6 +1254,7 @@ "NfsMount", "PersistentDiskSpec", "ResourcesConsumed", + "ShieldedVmConfig", "ManualBatchTuningParameters", "FindNeighborsRequest", "FindNeighborsResponse", @@ -1299,10 +1379,51 @@ "NasJobSpec", "NasTrial", "NasTrialDetail", + "NetworkSpec", + "NotebookEucConfig", + "NotebookIdleShutdownConfig", + "NotebookRuntime", + "NotebookRuntimeTemplate", + "NotebookRuntimeType", + "NotebookRuntimeTemplateRef", + "AssignNotebookRuntimeOperationMetadata", + "AssignNotebookRuntimeRequest", + "CreateNotebookRuntimeTemplateOperationMetadata", + "CreateNotebookRuntimeTemplateRequest", + "DeleteNotebookRuntimeRequest", + "DeleteNotebookRuntimeTemplateRequest", + "GetNotebookRuntimeRequest", + "GetNotebookRuntimeTemplateRequest", + "ListNotebookRuntimesRequest", + "ListNotebookRuntimesResponse", + "ListNotebookRuntimeTemplatesRequest", + "ListNotebookRuntimeTemplatesResponse", + "StartNotebookRuntimeOperationMetadata", + "StartNotebookRuntimeRequest", + "StartNotebookRuntimeResponse", + "UpgradeNotebookRuntimeOperationMetadata", + "UpgradeNotebookRuntimeRequest", + "UpgradeNotebookRuntimeResponse", "Schema", "Type", "DeleteOperationMetadata", "GenericOperationMetadata", + "PersistentResource", + "RaySpec", + "ResourcePool", + "ResourceRuntime", + "ResourceRuntimeSpec", + "ServiceAccountSpec", + "CreatePersistentResourceOperationMetadata", + "CreatePersistentResourceRequest", + "DeletePersistentResourceRequest", + "GetPersistentResourceRequest", + "ListPersistentResourcesRequest", + "ListPersistentResourcesResponse", + "RebootPersistentResourceOperationMetadata", + "RebootPersistentResourceRequest", + "UpdatePersistentResourceOperationMetadata", + "UpdatePersistentResourceRequest", "PipelineFailurePolicy", "PipelineJob", "PipelineJobDetail", @@ -1447,6 +1568,13 @@ "StratifiedSplit", "TimestampSplit", "TrainingPipeline", + "SupervisedHyperParameters", + "SupervisedTuningDatasetDistribution", + "SupervisedTuningDataStats", + "SupervisedTuningSpec", + "TunedModel", + "TuningDataStats", + "TuningJob", "BoolArray", "DoubleArray", "Int64Array", diff --git a/google/cloud/aiplatform_v1/types/content.py b/google/cloud/aiplatform_v1/types/content.py index d1a8423ac6..0c4b6b408e 100644 --- a/google/cloud/aiplatform_v1/types/content.py +++ b/google/cloud/aiplatform_v1/types/content.py @@ -192,16 +192,18 @@ class Part(proto.Message): class Blob(proto.Message): - r"""Raw media bytes. + r"""Content blob. - Text should not be sent as raw bytes, use the 'text' field. + It's preferred to send as + [text][google.cloud.aiplatform.v1.Part.text] directly rather than + raw bytes. Attributes: mime_type (str): Required. The IANA standard MIME type of the source data. data (bytes): - Required. Raw bytes for media formats. + Required. Raw bytes. """ mime_type: str = proto.Field( @@ -289,6 +291,23 @@ class GenerationConfig(proto.Message): This field is a member of `oneof`_ ``_max_output_tokens``. stop_sequences (MutableSequence[str]): Optional. Stop sequences. + presence_penalty (float): + Optional. Positive penalties. + + This field is a member of `oneof`_ ``_presence_penalty``. + frequency_penalty (float): + Optional. Frequency penalties. + + This field is a member of `oneof`_ ``_frequency_penalty``. + response_mime_type (str): + Optional. Output response mimetype of the generated + candidate text. Supported mimetype: + + - ``text/plain``: (default) Text output. + - ``application/json``: JSON response in the candidates. + The model needs to be prompted to output the appropriate + response type, otherwise the behavior is undefined. This + is a preview feature. """ temperature: float = proto.Field( @@ -320,6 +339,20 @@ class GenerationConfig(proto.Message): proto.STRING, number=6, ) + presence_penalty: float = proto.Field( + proto.FLOAT, + number=8, + optional=True, + ) + frequency_penalty: float = proto.Field( + proto.FLOAT, + number=9, + optional=True, + ) + response_mime_type: str = proto.Field( + proto.STRING, + number=13, + ) class SafetySetting(proto.Message): @@ -330,6 +363,11 @@ class SafetySetting(proto.Message): Required. Harm category. threshold (google.cloud.aiplatform_v1.types.SafetySetting.HarmBlockThreshold): Required. The harm block threshold. + method (google.cloud.aiplatform_v1.types.SafetySetting.HarmBlockMethod): + Optional. Specify if the threshold is used + for probability or severity score. If not + specified, the threshold is used for probability + score. """ class HarmBlockThreshold(proto.Enum): @@ -354,6 +392,23 @@ class HarmBlockThreshold(proto.Enum): BLOCK_ONLY_HIGH = 3 BLOCK_NONE = 4 + class HarmBlockMethod(proto.Enum): + r"""Probability vs severity. + + Values: + HARM_BLOCK_METHOD_UNSPECIFIED (0): + The harm block method is unspecified. + SEVERITY (1): + The harm block method uses both probability + and severity scores. + PROBABILITY (2): + The harm block method uses the probability + score. + """ + HARM_BLOCK_METHOD_UNSPECIFIED = 0 + SEVERITY = 1 + PROBABILITY = 2 + category: "HarmCategory" = proto.Field( proto.ENUM, number=1, @@ -364,6 +419,11 @@ class HarmBlockThreshold(proto.Enum): number=2, enum=HarmBlockThreshold, ) + method: HarmBlockMethod = proto.Field( + proto.ENUM, + number=4, + enum=HarmBlockMethod, + ) class SafetyRating(proto.Message): diff --git a/google/cloud/aiplatform_v1/types/custom_job.py b/google/cloud/aiplatform_v1/types/custom_job.py index 91ce8c66bd..2b0ce14ada 100644 --- a/google/cloud/aiplatform_v1/types/custom_job.py +++ b/google/cloud/aiplatform_v1/types/custom_job.py @@ -172,6 +172,16 @@ class CustomJobSpec(proto.Message): r"""Represents the spec of a CustomJob. Attributes: + persistent_resource_id (str): + Optional. The ID of the PersistentResource in + the same Project and Location which to run + + If this is specified, the job will be run on + existing machines held by the PersistentResource + instead of on-demand short-live machines. The + network and CMEK configs on the job should be + consistent with those on the PersistentResource, + otherwise, the job will be rejected. worker_pool_specs (MutableSequence[google.cloud.aiplatform_v1.types.WorkerPoolSpec]): Required. The spec of the worker pools including machine type and Docker image. All @@ -301,6 +311,10 @@ class CustomJobSpec(proto.Message): default version. """ + persistent_resource_id: str = proto.Field( + proto.STRING, + number=14, + ) worker_pool_specs: MutableSequence["WorkerPoolSpec"] = proto.RepeatedField( proto.MESSAGE, number=1, diff --git a/google/cloud/aiplatform_v1/types/endpoint.py b/google/cloud/aiplatform_v1/types/endpoint.py index 407edcd77b..781b6da199 100644 --- a/google/cloud/aiplatform_v1/types/endpoint.py +++ b/google/cloud/aiplatform_v1/types/endpoint.py @@ -274,6 +274,12 @@ class DeployedModel(proto.Message): is not populated, all fields of the [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec] will be used for the explanation configuration. + disable_explanations (bool): + If true, deploy the model without explainable feature, + regardless the existence of + [Model.explanation_spec][google.cloud.aiplatform.v1.Model.explanation_spec] + or + [explanation_spec][google.cloud.aiplatform.v1.DeployedModel.explanation_spec]. service_account (str): The service account that the DeployedModel's container runs as. Specify the email address of the service account. If @@ -357,6 +363,10 @@ class DeployedModel(proto.Message): number=9, message=explanation.ExplanationSpec, ) + disable_explanations: bool = proto.Field( + proto.BOOL, + number=19, + ) service_account: str = proto.Field( proto.STRING, number=11, diff --git a/google/cloud/aiplatform_v1/types/genai_tuning_service.py b/google/cloud/aiplatform_v1/types/genai_tuning_service.py new file mode 100644 index 0000000000..e6ba03a970 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/genai_tuning_service.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "CreateTuningJobRequest", + "GetTuningJobRequest", + "ListTuningJobsRequest", + "ListTuningJobsResponse", + "CancelTuningJobRequest", + }, +) + + +class CreateTuningJobRequest(proto.Message): + r"""Request message for + [GenAiTuningService.CreateTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.CreateTuningJob]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + TuningJob in. Format: + ``projects/{project}/locations/{location}`` + tuning_job (google.cloud.aiplatform_v1.types.TuningJob): + Required. The TuningJob to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + tuning_job: gca_tuning_job.TuningJob = proto.Field( + proto.MESSAGE, + number=2, + message=gca_tuning_job.TuningJob, + ) + + +class GetTuningJobRequest(proto.Message): + r"""Request message for + [GenAiTuningService.GetTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.GetTuningJob]. + + Attributes: + name (str): + Required. The name of the TuningJob resource. Format: + ``projects/{project}/locations/{location}/tuningJobs/{tuning_job}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListTuningJobsRequest(proto.Message): + r"""Request message for + [GenAiTuningService.ListTuningJobs][google.cloud.aiplatform.v1.GenAiTuningService.ListTuningJobs]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + TuningJobs from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. The standard list filter. + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via [ListTuningJob.next_page_token][] of the previous + GenAiTuningService.ListTuningJob][] call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=2, + ) + page_size: int = proto.Field( + proto.INT32, + number=3, + ) + page_token: str = proto.Field( + proto.STRING, + number=4, + ) + + +class ListTuningJobsResponse(proto.Message): + r"""Response message for + [GenAiTuningService.ListTuningJobs][google.cloud.aiplatform.v1.GenAiTuningService.ListTuningJobs] + + Attributes: + tuning_jobs (MutableSequence[google.cloud.aiplatform_v1.types.TuningJob]): + List of TuningJobs in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListTuningJobsRequest.page_token][google.cloud.aiplatform.v1.ListTuningJobsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + tuning_jobs: MutableSequence[gca_tuning_job.TuningJob] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_tuning_job.TuningJob, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class CancelTuningJobRequest(proto.Message): + r"""Request message for + [GenAiTuningService.CancelTuningJob][google.cloud.aiplatform.v1.GenAiTuningService.CancelTuningJob]. + + Attributes: + name (str): + Required. The name of the TuningJob to cancel. Format: + ``projects/{project}/locations/{location}/tuningJobs/{tuning_job}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/machine_resources.py b/google/cloud/aiplatform_v1/types/machine_resources.py index d83e00a7dd..3eb1ba0ad5 100644 --- a/google/cloud/aiplatform_v1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1/types/machine_resources.py @@ -34,6 +34,7 @@ "PersistentDiskSpec", "NfsMount", "AutoscalingMetricSpec", + "ShieldedVmConfig", }, ) @@ -387,4 +388,27 @@ class AutoscalingMetricSpec(proto.Message): ) +class ShieldedVmConfig(proto.Message): + r"""A set of Shielded Instance options. See `Images using supported + Shielded VM + features `__. + + Attributes: + enable_secure_boot (bool): + Defines whether the instance has `Secure + Boot `__ + enabled. + + Secure Boot helps ensure that the system only runs authentic + software by verifying the digital signature of all boot + components, and halting the boot process if signature + verification fails. + """ + + enable_secure_boot: bool = proto.Field( + proto.BOOL, + number=1, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/network_spec.py b/google/cloud/aiplatform_v1/types/network_spec.py new file mode 100644 index 0000000000..0566c60d7f --- /dev/null +++ b/google/cloud/aiplatform_v1/types/network_spec.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "NetworkSpec", + }, +) + + +class NetworkSpec(proto.Message): + r"""Network spec. + + Attributes: + enable_internet_access (bool): + Whether to enable public internet access. + Default false. + network (str): + The full name of the Google Compute Engine + `network `__ + subnetwork (str): + The name of the subnet that this instance is in. Format: + ``projects/{project_id_or_number}/regions/{region}/subnetworks/{subnetwork_id}`` + """ + + enable_internet_access: bool = proto.Field( + proto.BOOL, + number=1, + ) + network: str = proto.Field( + proto.STRING, + number=2, + ) + subnetwork: str = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/notebook_euc_config.py b/google/cloud/aiplatform_v1/types/notebook_euc_config.py new file mode 100644 index 0000000000..c5b0dc3078 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/notebook_euc_config.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "NotebookEucConfig", + }, +) + + +class NotebookEucConfig(proto.Message): + r"""The euc configuration of NotebookRuntimeTemplate. + + Attributes: + euc_disabled (bool): + Input only. Whether EUC is disabled in this + NotebookRuntimeTemplate. In proto3, the default + value of a boolean is false. In this way, by + default EUC will be enabled for + NotebookRuntimeTemplate. + bypass_actas_check (bool): + Output only. Whether ActAs check is bypassed + for service account attached to the VM. If + false, we need ActAs check for the default + Compute Engine Service account. When a Runtime + is created, a VM is allocated using Default + Compute Engine Service Account. Any user + requesting to use this Runtime requires Service + Account User (ActAs) permission over this SA. If + true, Runtime owner is using EUC and does not + require the above permission as VM no longer use + default Compute Engine SA, but a P4SA. + """ + + euc_disabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + bypass_actas_check: bool = proto.Field( + proto.BOOL, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/notebook_idle_shutdown_config.py b/google/cloud/aiplatform_v1/types/notebook_idle_shutdown_config.py new file mode 100644 index 0000000000..ec6975ba11 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/notebook_idle_shutdown_config.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "NotebookIdleShutdownConfig", + }, +) + + +class NotebookIdleShutdownConfig(proto.Message): + r"""The idle shutdown configuration of NotebookRuntimeTemplate, which + contains the idle_timeout as required field. + + Attributes: + idle_timeout (google.protobuf.duration_pb2.Duration): + Required. Duration is accurate to the second. In Notebook, + Idle Timeout is accurate to minute so the range of + idle_timeout (second) is: 10 \* 60 ~ 1440 + + - + + 60. + idle_shutdown_disabled (bool): + Whether Idle Shutdown is disabled in this + NotebookRuntimeTemplate. + """ + + idle_timeout: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + idle_shutdown_disabled: bool = proto.Field( + proto.BOOL, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/notebook_runtime.py b/google/cloud/aiplatform_v1/types/notebook_runtime.py new file mode 100644 index 0000000000..ecc1b088bb --- /dev/null +++ b/google/cloud/aiplatform_v1/types/notebook_runtime.py @@ -0,0 +1,443 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import network_spec as gca_network_spec +from google.cloud.aiplatform_v1.types import notebook_euc_config +from google.cloud.aiplatform_v1.types import notebook_idle_shutdown_config +from google.cloud.aiplatform_v1.types import ( + notebook_runtime_template_ref as gca_notebook_runtime_template_ref, +) +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "NotebookRuntimeType", + "NotebookRuntimeTemplate", + "NotebookRuntime", + }, +) + + +class NotebookRuntimeType(proto.Enum): + r"""Represents a notebook runtime type. + + Values: + NOTEBOOK_RUNTIME_TYPE_UNSPECIFIED (0): + Unspecified notebook runtime type, NotebookRuntimeType will + default to USER_DEFINED. + USER_DEFINED (1): + runtime or template with coustomized + configurations from user. + ONE_CLICK (2): + runtime or template with system defined + configurations. + """ + NOTEBOOK_RUNTIME_TYPE_UNSPECIFIED = 0 + USER_DEFINED = 1 + ONE_CLICK = 2 + + +class NotebookRuntimeTemplate(proto.Message): + r"""A template that specifies runtime configurations such as + machine type, runtime version, network configurations, etc. + Multiple runtimes can be created from a runtime template. + + Attributes: + name (str): + Output only. The resource name of the + NotebookRuntimeTemplate. + display_name (str): + Required. The display name of the + NotebookRuntimeTemplate. The name can be up to + 128 characters long and can consist of any UTF-8 + characters. + description (str): + The description of the + NotebookRuntimeTemplate. + is_default (bool): + Output only. The default template to use if + not specified. + machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): + Optional. Immutable. The specification of a + single machine for the template. + data_persistent_disk_spec (google.cloud.aiplatform_v1.types.PersistentDiskSpec): + Optional. The specification of [persistent + disk][https://cloud.google.com/compute/docs/disks/persistent-disks] + attached to the runtime as data disk storage. + network_spec (google.cloud.aiplatform_v1.types.NetworkSpec): + Optional. Network spec. + service_account (str): + The service account that the runtime workload runs as. You + can use any service account within the same project, but you + must have the service account user permission to use the + instance. + + If not specified, the `Compute Engine default service + account `__ + is used. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (MutableMapping[str, str]): + The labels with user-defined metadata to + organize the NotebookRuntimeTemplates. + + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + + See https://goo.gl/xmQnxf for more information + and examples of labels. + idle_shutdown_config (google.cloud.aiplatform_v1.types.NotebookIdleShutdownConfig): + The idle shutdown configuration of + NotebookRuntimeTemplate. This config will only + be set when idle shutdown is enabled. + euc_config (google.cloud.aiplatform_v1.types.NotebookEucConfig): + EUC configuration of the + NotebookRuntimeTemplate. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + NotebookRuntimeTemplate was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + NotebookRuntimeTemplate was most recently + updated. + notebook_runtime_type (google.cloud.aiplatform_v1.types.NotebookRuntimeType): + Optional. Immutable. The type of the notebook + runtime template. + shielded_vm_config (google.cloud.aiplatform_v1.types.ShieldedVmConfig): + Optional. Immutable. Runtime Shielded VM + spec. + network_tags (MutableSequence[str]): + Optional. The Compute Engine tags to add to runtime (see + `Tagging + instances `__). + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + description: str = proto.Field( + proto.STRING, + number=3, + ) + is_default: bool = proto.Field( + proto.BOOL, + number=4, + ) + machine_spec: machine_resources.MachineSpec = proto.Field( + proto.MESSAGE, + number=5, + message=machine_resources.MachineSpec, + ) + data_persistent_disk_spec: machine_resources.PersistentDiskSpec = proto.Field( + proto.MESSAGE, + number=8, + message=machine_resources.PersistentDiskSpec, + ) + network_spec: gca_network_spec.NetworkSpec = proto.Field( + proto.MESSAGE, + number=12, + message=gca_network_spec.NetworkSpec, + ) + service_account: str = proto.Field( + proto.STRING, + number=13, + ) + etag: str = proto.Field( + proto.STRING, + number=14, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=15, + ) + idle_shutdown_config: notebook_idle_shutdown_config.NotebookIdleShutdownConfig = ( + proto.Field( + proto.MESSAGE, + number=17, + message=notebook_idle_shutdown_config.NotebookIdleShutdownConfig, + ) + ) + euc_config: notebook_euc_config.NotebookEucConfig = proto.Field( + proto.MESSAGE, + number=18, + message=notebook_euc_config.NotebookEucConfig, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + notebook_runtime_type: "NotebookRuntimeType" = proto.Field( + proto.ENUM, + number=19, + enum="NotebookRuntimeType", + ) + shielded_vm_config: machine_resources.ShieldedVmConfig = proto.Field( + proto.MESSAGE, + number=20, + message=machine_resources.ShieldedVmConfig, + ) + network_tags: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=21, + ) + + +class NotebookRuntime(proto.Message): + r"""A runtime is a virtual machine allocated to a particular user + for a particular Notebook file on temporary basis with lifetime + limited to 24 hours. + + Attributes: + name (str): + Output only. The resource name of the + NotebookRuntime. + runtime_user (str): + Required. The user email of the + NotebookRuntime. + notebook_runtime_template_ref (google.cloud.aiplatform_v1.types.NotebookRuntimeTemplateRef): + Output only. The pointer to + NotebookRuntimeTemplate this NotebookRuntime is + created from. + proxy_uri (str): + Output only. The proxy endpoint used to + access the NotebookRuntime. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + NotebookRuntime was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + NotebookRuntime was most recently updated. + health_state (google.cloud.aiplatform_v1.types.NotebookRuntime.HealthState): + Output only. The health state of the + NotebookRuntime. + display_name (str): + Required. The display name of the + NotebookRuntime. The name can be up to 128 + characters long and can consist of any UTF-8 + characters. + description (str): + The description of the NotebookRuntime. + service_account (str): + Output only. The service account that the + NotebookRuntime workload runs as. + runtime_state (google.cloud.aiplatform_v1.types.NotebookRuntime.RuntimeState): + Output only. The runtime (instance) state of + the NotebookRuntime. + is_upgradable (bool): + Output only. Whether NotebookRuntime is + upgradable. + labels (MutableMapping[str, str]): + The labels with user-defined metadata to organize your + NotebookRuntime. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one NotebookRuntime (System labels are + excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for NotebookRuntime: + + - "aiplatform.googleapis.com/notebook_runtime_gce_instance_id": + output only, its value is the Compute Engine instance id. + - "aiplatform.googleapis.com/colab_enterprise_entry_service": + its value is either "bigquery" or "vertex"; if absent, it + should be "vertex". This is to describe the entry + service, either BigQuery or Vertex. + expiration_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + NotebookRuntime will be expired: + + 1. System Predefined NotebookRuntime: 24 hours + after creation. After expiration, system + predifined runtime will be deleted. + 2. User created NotebookRuntime: 6 months after + last upgrade. After expiration, user created + runtime will be stopped and allowed for + upgrade. + version (str): + Output only. The VM os image version of + NotebookRuntime. + notebook_runtime_type (google.cloud.aiplatform_v1.types.NotebookRuntimeType): + Output only. The type of the notebook + runtime. + network_tags (MutableSequence[str]): + Optional. The Compute Engine tags to add to runtime (see + `Tagging + instances `__). + """ + + class HealthState(proto.Enum): + r"""The substate of the NotebookRuntime to display health + information. + + Values: + HEALTH_STATE_UNSPECIFIED (0): + Unspecified health state. + HEALTHY (1): + NotebookRuntime is in healthy state. Applies + to ACTIVE state. + UNHEALTHY (2): + NotebookRuntime is in unhealthy state. + Applies to ACTIVE state. + """ + HEALTH_STATE_UNSPECIFIED = 0 + HEALTHY = 1 + UNHEALTHY = 2 + + class RuntimeState(proto.Enum): + r"""The substate of the NotebookRuntime to display state of + runtime. The resource of NotebookRuntime is in ACTIVE state for + these sub state. + + Values: + RUNTIME_STATE_UNSPECIFIED (0): + Unspecified runtime state. + RUNNING (1): + NotebookRuntime is in running state. + BEING_STARTED (2): + NotebookRuntime is in starting state. + BEING_STOPPED (3): + NotebookRuntime is in stopping state. + STOPPED (4): + NotebookRuntime is in stopped state. + BEING_UPGRADED (5): + NotebookRuntime is in upgrading state. It is + in the middle of upgrading process. + ERROR (100): + NotebookRuntime was unable to start/stop + properly. + INVALID (101): + NotebookRuntime is in invalid state. Cannot + be recovered. + """ + RUNTIME_STATE_UNSPECIFIED = 0 + RUNNING = 1 + BEING_STARTED = 2 + BEING_STOPPED = 3 + STOPPED = 4 + BEING_UPGRADED = 5 + ERROR = 100 + INVALID = 101 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + runtime_user: str = proto.Field( + proto.STRING, + number=2, + ) + notebook_runtime_template_ref: gca_notebook_runtime_template_ref.NotebookRuntimeTemplateRef = proto.Field( + proto.MESSAGE, + number=3, + message=gca_notebook_runtime_template_ref.NotebookRuntimeTemplateRef, + ) + proxy_uri: str = proto.Field( + proto.STRING, + number=5, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + health_state: HealthState = proto.Field( + proto.ENUM, + number=8, + enum=HealthState, + ) + display_name: str = proto.Field( + proto.STRING, + number=10, + ) + description: str = proto.Field( + proto.STRING, + number=11, + ) + service_account: str = proto.Field( + proto.STRING, + number=13, + ) + runtime_state: RuntimeState = proto.Field( + proto.ENUM, + number=14, + enum=RuntimeState, + ) + is_upgradable: bool = proto.Field( + proto.BOOL, + number=15, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=16, + ) + expiration_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=17, + message=timestamp_pb2.Timestamp, + ) + version: str = proto.Field( + proto.STRING, + number=18, + ) + notebook_runtime_type: "NotebookRuntimeType" = proto.Field( + proto.ENUM, + number=19, + enum="NotebookRuntimeType", + ) + network_tags: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=25, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/notebook_runtime_template_ref.py b/google/cloud/aiplatform_v1/types/notebook_runtime_template_ref.py new file mode 100644 index 0000000000..38646b13cf --- /dev/null +++ b/google/cloud/aiplatform_v1/types/notebook_runtime_template_ref.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "NotebookRuntimeTemplateRef", + }, +) + + +class NotebookRuntimeTemplateRef(proto.Message): + r"""Points to a NotebookRuntimeTemplateRef. + + Attributes: + notebook_runtime_template (str): + Immutable. A resource name of the + NotebookRuntimeTemplate. + """ + + notebook_runtime_template: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/notebook_service.py b/google/cloud/aiplatform_v1/types/notebook_service.py new file mode 100644 index 0000000000..175d7583ca --- /dev/null +++ b/google/cloud/aiplatform_v1/types/notebook_service.py @@ -0,0 +1,593 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime +from google.cloud.aiplatform_v1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "CreateNotebookRuntimeTemplateRequest", + "CreateNotebookRuntimeTemplateOperationMetadata", + "GetNotebookRuntimeTemplateRequest", + "ListNotebookRuntimeTemplatesRequest", + "ListNotebookRuntimeTemplatesResponse", + "DeleteNotebookRuntimeTemplateRequest", + "AssignNotebookRuntimeRequest", + "AssignNotebookRuntimeOperationMetadata", + "GetNotebookRuntimeRequest", + "ListNotebookRuntimesRequest", + "ListNotebookRuntimesResponse", + "DeleteNotebookRuntimeRequest", + "UpgradeNotebookRuntimeRequest", + "UpgradeNotebookRuntimeOperationMetadata", + "UpgradeNotebookRuntimeResponse", + "StartNotebookRuntimeRequest", + "StartNotebookRuntimeOperationMetadata", + "StartNotebookRuntimeResponse", + }, +) + + +class CreateNotebookRuntimeTemplateRequest(proto.Message): + r"""Request message for + [NotebookService.CreateNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.CreateNotebookRuntimeTemplate]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + NotebookRuntimeTemplate. Format: + ``projects/{project}/locations/{location}`` + notebook_runtime_template (google.cloud.aiplatform_v1.types.NotebookRuntimeTemplate): + Required. The NotebookRuntimeTemplate to + create. + notebook_runtime_template_id (str): + Optional. User specified ID for the notebook + runtime template. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + notebook_runtime_template: gca_notebook_runtime.NotebookRuntimeTemplate = ( + proto.Field( + proto.MESSAGE, + number=2, + message=gca_notebook_runtime.NotebookRuntimeTemplate, + ) + ) + notebook_runtime_template_id: str = proto.Field( + proto.STRING, + number=3, + ) + + +class CreateNotebookRuntimeTemplateOperationMetadata(proto.Message): + r"""Metadata information for + [NotebookService.CreateNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.CreateNotebookRuntimeTemplate]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetNotebookRuntimeTemplateRequest(proto.Message): + r"""Request message for + [NotebookService.GetNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.GetNotebookRuntimeTemplate] + + Attributes: + name (str): + Required. The name of the NotebookRuntimeTemplate resource. + Format: + ``projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListNotebookRuntimeTemplatesRequest(proto.Message): + r"""Request message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimeTemplates]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the NotebookRuntimeTemplates. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``notebookRuntimeTemplate`` supports = and !=. + ``notebookRuntimeTemplate`` represents the + NotebookRuntimeTemplate ID, i.e. the last segment of the + NotebookRuntimeTemplate's [resource name] + [google.cloud.aiplatform.v1.NotebookRuntimeTemplate.name]. + - ``display_name`` supports = and != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + - ``notebookRuntimeType`` supports = and !=. + notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. + + Some examples: + + - ``notebookRuntimeTemplate=notebookRuntimeTemplate123`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + - ``notebookRuntimeType=USER_DEFINED`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListNotebookRuntimeTemplatesResponse.next_page_token][google.cloud.aiplatform.v1.ListNotebookRuntimeTemplatesResponse.next_page_token] + of the previous + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimeTemplates] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields to + read. + order_by (str): + Optional. A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + + Example: ``display_name, create_time desc``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=2, + ) + page_size: int = proto.Field( + proto.INT32, + number=3, + ) + page_token: str = proto.Field( + proto.STRING, + number=4, + ) + read_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListNotebookRuntimeTemplatesResponse(proto.Message): + r"""Response message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimeTemplates]. + + Attributes: + notebook_runtime_templates (MutableSequence[google.cloud.aiplatform_v1.types.NotebookRuntimeTemplate]): + List of NotebookRuntimeTemplates in the + requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListNotebookRuntimeTemplatesRequest.page_token][google.cloud.aiplatform.v1.ListNotebookRuntimeTemplatesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + notebook_runtime_templates: MutableSequence[ + gca_notebook_runtime.NotebookRuntimeTemplate + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_notebook_runtime.NotebookRuntimeTemplate, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteNotebookRuntimeTemplateRequest(proto.Message): + r"""Request message for + [NotebookService.DeleteNotebookRuntimeTemplate][google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntimeTemplate]. + + Attributes: + name (str): + Required. The name of the NotebookRuntimeTemplate resource + to be deleted. Format: + ``projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class AssignNotebookRuntimeRequest(proto.Message): + r"""Request message for + [NotebookService.AssignNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.AssignNotebookRuntime]. + + Attributes: + parent (str): + Required. The resource name of the Location to get the + NotebookRuntime assignment. Format: + ``projects/{project}/locations/{location}`` + notebook_runtime_template (str): + Required. The resource name of the + NotebookRuntimeTemplate based on which a + NotebookRuntime will be assigned (reuse or + create a new one). + notebook_runtime (google.cloud.aiplatform_v1.types.NotebookRuntime): + Required. Provide runtime specific + information (e.g. runtime owner, notebook id) + used for NotebookRuntime assignment. + notebook_runtime_id (str): + Optional. User specified ID for the notebook + runtime. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + notebook_runtime_template: str = proto.Field( + proto.STRING, + number=2, + ) + notebook_runtime: gca_notebook_runtime.NotebookRuntime = proto.Field( + proto.MESSAGE, + number=3, + message=gca_notebook_runtime.NotebookRuntime, + ) + notebook_runtime_id: str = proto.Field( + proto.STRING, + number=4, + ) + + +class AssignNotebookRuntimeOperationMetadata(proto.Message): + r"""Metadata information for + [NotebookService.AssignNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.AssignNotebookRuntime]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + progress_message (str): + A human-readable message that shows the + intermediate progress details of + NotebookRuntime. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + progress_message: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetNotebookRuntimeRequest(proto.Message): + r"""Request message for + [NotebookService.GetNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.GetNotebookRuntime] + + Attributes: + name (str): + Required. The name of the NotebookRuntime + resource. Instead of checking whether the name + is in valid NotebookRuntime resource name + format, directly throw NotFound exception if + there is no such NotebookRuntime in spanner. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListNotebookRuntimesRequest(proto.Message): + r"""Request message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimes]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the NotebookRuntimes. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``notebookRuntime`` supports = and !=. + ``notebookRuntime`` represents the NotebookRuntime ID, + i.e. the last segment of the NotebookRuntime's [resource + name] [google.cloud.aiplatform.v1.NotebookRuntime.name]. + - ``displayName`` supports = and != and regex. + - ``notebookRuntimeTemplate`` supports = and !=. + ``notebookRuntimeTemplate`` represents the + NotebookRuntimeTemplate ID, i.e. the last segment of the + NotebookRuntimeTemplate's [resource name] + [google.cloud.aiplatform.v1.NotebookRuntimeTemplate.name]. + - ``healthState`` supports = and !=. healthState enum: + [HEALTHY, UNHEALTHY, HEALTH_STATE_UNSPECIFIED]. + - ``runtimeState`` supports = and !=. runtimeState enum: + [RUNTIME_STATE_UNSPECIFIED, RUNNING, BEING_STARTED, + BEING_STOPPED, STOPPED, BEING_UPGRADED, ERROR, INVALID]. + - ``runtimeUser`` supports = and !=. + - API version is UI only: ``uiState`` supports = and !=. + uiState enum: [UI_RESOURCE_STATE_UNSPECIFIED, + UI_RESOURCE_STATE_BEING_CREATED, + UI_RESOURCE_STATE_ACTIVE, + UI_RESOURCE_STATE_BEING_DELETED, + UI_RESOURCE_STATE_CREATION_FAILED]. + - ``notebookRuntimeType`` supports = and !=. + notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. + + Some examples: + + - ``notebookRuntime="notebookRuntime123"`` + - ``displayName="myDisplayName"`` and + ``displayName=~"myDisplayNameRegex"`` + - ``notebookRuntimeTemplate="notebookRuntimeTemplate321"`` + - ``healthState=HEALTHY`` + - ``runtimeState=RUNNING`` + - ``runtimeUser="test@google.com"`` + - ``uiState=UI_RESOURCE_STATE_BEING_DELETED`` + - ``notebookRuntimeType=USER_DEFINED`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListNotebookRuntimesResponse.next_page_token][google.cloud.aiplatform.v1.ListNotebookRuntimesResponse.next_page_token] + of the previous + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimes] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields to + read. + order_by (str): + Optional. A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + + Example: ``display_name, create_time desc``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=2, + ) + page_size: int = proto.Field( + proto.INT32, + number=3, + ) + page_token: str = proto.Field( + proto.STRING, + number=4, + ) + read_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListNotebookRuntimesResponse(proto.Message): + r"""Response message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimes]. + + Attributes: + notebook_runtimes (MutableSequence[google.cloud.aiplatform_v1.types.NotebookRuntime]): + List of NotebookRuntimes in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListNotebookRuntimesRequest.page_token][google.cloud.aiplatform.v1.ListNotebookRuntimesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + notebook_runtimes: MutableSequence[ + gca_notebook_runtime.NotebookRuntime + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_notebook_runtime.NotebookRuntime, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteNotebookRuntimeRequest(proto.Message): + r"""Request message for + [NotebookService.DeleteNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntime]. + + Attributes: + name (str): + Required. The name of the NotebookRuntime + resource to be deleted. Instead of checking + whether the name is in valid NotebookRuntime + resource name format, directly throw NotFound + exception if there is no such NotebookRuntime in + spanner. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpgradeNotebookRuntimeRequest(proto.Message): + r"""Request message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.UpgradeNotebookRuntime]. + + Attributes: + name (str): + Required. The name of the NotebookRuntime + resource to be upgrade. Instead of checking + whether the name is in valid NotebookRuntime + resource name format, directly throw NotFound + exception if there is no such NotebookRuntime in + spanner. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpgradeNotebookRuntimeOperationMetadata(proto.Message): + r"""Metadata information for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.UpgradeNotebookRuntime]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + progress_message (str): + A human-readable message that shows the + intermediate progress details of + NotebookRuntime. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + progress_message: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpgradeNotebookRuntimeResponse(proto.Message): + r"""Response message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.UpgradeNotebookRuntime]. + + """ + + +class StartNotebookRuntimeRequest(proto.Message): + r"""Request message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.StartNotebookRuntime]. + + Attributes: + name (str): + Required. The name of the NotebookRuntime + resource to be started. Instead of checking + whether the name is in valid NotebookRuntime + resource name format, directly throw NotFound + exception if there is no such NotebookRuntime in + spanner. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class StartNotebookRuntimeOperationMetadata(proto.Message): + r"""Metadata information for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.StartNotebookRuntime]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + The operation generic information. + progress_message (str): + A human-readable message that shows the + intermediate progress details of + NotebookRuntime. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + progress_message: str = proto.Field( + proto.STRING, + number=2, + ) + + +class StartNotebookRuntimeResponse(proto.Message): + r"""Response message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1.NotebookService.StartNotebookRuntime]. + + """ + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/openapi.py b/google/cloud/aiplatform_v1/types/openapi.py index 266c5ff43b..3213783cb6 100644 --- a/google/cloud/aiplatform_v1/types/openapi.py +++ b/google/cloud/aiplatform_v1/types/openapi.py @@ -73,14 +73,25 @@ class Schema(proto.Message): Optional. The format of the data. Supported formats: - for NUMBER type: float, double - for INTEGER type: int32, int64 + for NUMBER type: "float", "double" + for INTEGER type: "int32", "int64" + for STRING type: "email", "byte", etc + title (str): + Optional. The title of the Schema. description (str): Optional. The description of the data. nullable (bool): Optional. Indicates if the value may be null. + default (google.protobuf.struct_pb2.Value): + Optional. Default value of the data. items (google.cloud.aiplatform_v1.types.Schema): - Optional. Schema of the elements of + Optional. SCHEMA FIELDS FOR TYPE ARRAY + Schema of the elements of Type.ARRAY. + min_items (int): + Optional. Minimum number of the elements for + Type.ARRAY. + max_items (int): + Optional. Maximum number of the elements for Type.ARRAY. enum (MutableSequence[str]): Optional. Possible values of the element of Type.STRING with @@ -88,9 +99,31 @@ class Schema(proto.Message): : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} properties (MutableMapping[str, google.cloud.aiplatform_v1.types.Schema]): - Optional. Properties of Type.OBJECT. + Optional. SCHEMA FIELDS FOR TYPE OBJECT + Properties of Type.OBJECT. required (MutableSequence[str]): Optional. Required properties of Type.OBJECT. + min_properties (int): + Optional. Minimum number of the properties + for Type.OBJECT. + max_properties (int): + Optional. Maximum number of the properties + for Type.OBJECT. + minimum (float): + Optional. SCHEMA FIELDS FOR TYPE INTEGER and + NUMBER Minimum value of the Type.INTEGER and + Type.NUMBER + maximum (float): + Optional. Maximum value of the Type.INTEGER + and Type.NUMBER + min_length (int): + Optional. SCHEMA FIELDS FOR TYPE STRING + Minimum length of the Type.STRING + max_length (int): + Optional. Maximum length of the Type.STRING + pattern (str): + Optional. Pattern of the Type.STRING to + restrict a string to a regular expression. example (google.protobuf.struct_pb2.Value): Optional. Example of the object. Will only populated when the object is the root. @@ -105,6 +138,10 @@ class Schema(proto.Message): proto.STRING, number=7, ) + title: str = proto.Field( + proto.STRING, + number=24, + ) description: str = proto.Field( proto.STRING, number=8, @@ -113,11 +150,24 @@ class Schema(proto.Message): proto.BOOL, number=6, ) + default: struct_pb2.Value = proto.Field( + proto.MESSAGE, + number=23, + message=struct_pb2.Value, + ) items: "Schema" = proto.Field( proto.MESSAGE, number=2, message="Schema", ) + min_items: int = proto.Field( + proto.INT64, + number=21, + ) + max_items: int = proto.Field( + proto.INT64, + number=22, + ) enum: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=9, @@ -132,6 +182,34 @@ class Schema(proto.Message): proto.STRING, number=5, ) + min_properties: int = proto.Field( + proto.INT64, + number=14, + ) + max_properties: int = proto.Field( + proto.INT64, + number=15, + ) + minimum: float = proto.Field( + proto.DOUBLE, + number=16, + ) + maximum: float = proto.Field( + proto.DOUBLE, + number=17, + ) + min_length: int = proto.Field( + proto.INT64, + number=18, + ) + max_length: int = proto.Field( + proto.INT64, + number=19, + ) + pattern: str = proto.Field( + proto.STRING, + number=20, + ) example: struct_pb2.Value = proto.Field( proto.MESSAGE, number=4, diff --git a/google/cloud/aiplatform_v1/types/persistent_resource.py b/google/cloud/aiplatform_v1/types/persistent_resource.py new file mode 100644 index 0000000000..98fff590ca --- /dev/null +++ b/google/cloud/aiplatform_v1/types/persistent_resource.py @@ -0,0 +1,403 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import encryption_spec as gca_encryption_spec +from google.cloud.aiplatform_v1.types import machine_resources +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "PersistentResource", + "ResourcePool", + "ResourceRuntimeSpec", + "RaySpec", + "ResourceRuntime", + "ServiceAccountSpec", + }, +) + + +class PersistentResource(proto.Message): + r"""Represents long-lasting resources that are dedicated to users + to runs custom workloads. + A PersistentResource can have multiple node pools and each node + pool can have its own machine spec. + + Attributes: + name (str): + Immutable. Resource name of a + PersistentResource. + display_name (str): + Optional. The display name of the + PersistentResource. The name can be up to 128 + characters long and can consist of any UTF-8 + characters. + resource_pools (MutableSequence[google.cloud.aiplatform_v1.types.ResourcePool]): + Required. The spec of the pools of different + resources. + state (google.cloud.aiplatform_v1.types.PersistentResource.State): + Output only. The detailed state of a Study. + error (google.rpc.status_pb2.Status): + Output only. Only populated when persistent resource's state + is ``STOPPING`` or ``ERROR``. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the PersistentResource + was created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the PersistentResource for the first + time entered the ``RUNNING`` state. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the PersistentResource + was most recently updated. + labels (MutableMapping[str, str]): + Optional. The labels with user-defined + metadata to organize PersistentResource. + + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + + See https://goo.gl/xmQnxf for more information + and examples of labels. + network (str): + Optional. The full name of the Compute Engine + `network `__ + to peered with Vertex AI to host the persistent resources. + For example, ``projects/12345/global/networks/myVPC``. + `Format `__ + is of the form + ``projects/{project}/global/networks/{network}``. Where + {project} is a project number, as in ``12345``, and + {network} is a network name. + + To specify this field, you must have already `configured VPC + Network Peering for Vertex + AI `__. + + If this field is left unspecified, the resources aren't + peered with any network. + encryption_spec (google.cloud.aiplatform_v1.types.EncryptionSpec): + Optional. Customer-managed encryption key + spec for a PersistentResource. If set, this + PersistentResource and all sub-resources of this + PersistentResource will be secured by this key. + resource_runtime_spec (google.cloud.aiplatform_v1.types.ResourceRuntimeSpec): + Optional. Persistent Resource runtime spec. + For example, used for Ray cluster configuration. + resource_runtime (google.cloud.aiplatform_v1.types.ResourceRuntime): + Output only. Runtime information of the + Persistent Resource. + reserved_ip_ranges (MutableSequence[str]): + Optional. A list of names for the reserved IP ranges under + the VPC network that can be used for this persistent + resource. + + If set, we will deploy the persistent resource within the + provided IP ranges. Otherwise, the persistent resource is + deployed to any IP ranges under the provided VPC network. + + Example: ['vertex-ai-ip-range']. + """ + + class State(proto.Enum): + r"""Describes the PersistentResource state. + + Values: + STATE_UNSPECIFIED (0): + Not set. + PROVISIONING (1): + The PROVISIONING state indicates the + persistent resources is being created. + RUNNING (3): + The RUNNING state indicates the persistent + resource is healthy and fully usable. + STOPPING (4): + The STOPPING state indicates the persistent + resource is being deleted. + ERROR (5): + The ERROR state indicates the persistent resource may be + unusable. Details can be found in the ``error`` field. + REBOOTING (6): + The REBOOTING state indicates the persistent + resource is being rebooted (PR is not available + right now but is expected to be ready again + later). + UPDATING (7): + The UPDATING state indicates the persistent + resource is being updated. + """ + STATE_UNSPECIFIED = 0 + PROVISIONING = 1 + RUNNING = 3 + STOPPING = 4 + ERROR = 5 + REBOOTING = 6 + UPDATING = 7 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + resource_pools: MutableSequence["ResourcePool"] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message="ResourcePool", + ) + state: State = proto.Field( + proto.ENUM, + number=5, + enum=State, + ) + error: status_pb2.Status = proto.Field( + proto.MESSAGE, + number=6, + message=status_pb2.Status, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=10, + ) + network: str = proto.Field( + proto.STRING, + number=11, + ) + encryption_spec: gca_encryption_spec.EncryptionSpec = proto.Field( + proto.MESSAGE, + number=12, + message=gca_encryption_spec.EncryptionSpec, + ) + resource_runtime_spec: "ResourceRuntimeSpec" = proto.Field( + proto.MESSAGE, + number=13, + message="ResourceRuntimeSpec", + ) + resource_runtime: "ResourceRuntime" = proto.Field( + proto.MESSAGE, + number=14, + message="ResourceRuntime", + ) + reserved_ip_ranges: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=15, + ) + + +class ResourcePool(proto.Message): + r"""Represents the spec of a group of resources of the same type, + for example machine type, disk, and accelerators, in a + PersistentResource. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + id (str): + Immutable. The unique ID in a + PersistentResource for referring to this + resource pool. User can specify it if necessary. + Otherwise, it's generated automatically. + machine_spec (google.cloud.aiplatform_v1.types.MachineSpec): + Required. Immutable. The specification of a + single machine. + replica_count (int): + Optional. The total number of machines to use + for this resource pool. + + This field is a member of `oneof`_ ``_replica_count``. + disk_spec (google.cloud.aiplatform_v1.types.DiskSpec): + Optional. Disk spec for the machine in this + node pool. + used_replica_count (int): + Output only. The number of machines currently in use by + training jobs for this resource pool. Will replace + idle_replica_count. + autoscaling_spec (google.cloud.aiplatform_v1.types.ResourcePool.AutoscalingSpec): + Optional. Optional spec to configure GKE + autoscaling + """ + + class AutoscalingSpec(proto.Message): + r"""The min/max number of replicas allowed if enabling + autoscaling + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + min_replica_count (int): + Optional. min replicas in the node pool, must be ≤ + replica_count and < max_replica_count or will throw error + + This field is a member of `oneof`_ ``_min_replica_count``. + max_replica_count (int): + Optional. max replicas in the node pool, must be ≥ + replica_count and > min_replica_count or will throw error + + This field is a member of `oneof`_ ``_max_replica_count``. + """ + + min_replica_count: int = proto.Field( + proto.INT64, + number=1, + optional=True, + ) + max_replica_count: int = proto.Field( + proto.INT64, + number=2, + optional=True, + ) + + id: str = proto.Field( + proto.STRING, + number=1, + ) + machine_spec: machine_resources.MachineSpec = proto.Field( + proto.MESSAGE, + number=2, + message=machine_resources.MachineSpec, + ) + replica_count: int = proto.Field( + proto.INT64, + number=3, + optional=True, + ) + disk_spec: machine_resources.DiskSpec = proto.Field( + proto.MESSAGE, + number=4, + message=machine_resources.DiskSpec, + ) + used_replica_count: int = proto.Field( + proto.INT64, + number=6, + ) + autoscaling_spec: AutoscalingSpec = proto.Field( + proto.MESSAGE, + number=7, + message=AutoscalingSpec, + ) + + +class ResourceRuntimeSpec(proto.Message): + r"""Configuration for the runtime on a PersistentResource instance, + including but not limited to: + + - Service accounts used to run the workloads. + - Whether to make it a dedicated Ray Cluster. + + Attributes: + service_account_spec (google.cloud.aiplatform_v1.types.ServiceAccountSpec): + Optional. Configure the use of workload + identity on the PersistentResource + ray_spec (google.cloud.aiplatform_v1.types.RaySpec): + Optional. Ray cluster configuration. + Required when creating a dedicated RayCluster on + the PersistentResource. + """ + + service_account_spec: "ServiceAccountSpec" = proto.Field( + proto.MESSAGE, + number=2, + message="ServiceAccountSpec", + ) + ray_spec: "RaySpec" = proto.Field( + proto.MESSAGE, + number=1, + message="RaySpec", + ) + + +class RaySpec(proto.Message): + r"""Configuration information for the Ray cluster. + For experimental launch, Ray cluster creation and Persistent + cluster creation are 1:1 mapping: We will provision all the + nodes within the Persistent cluster as Ray nodes. + + """ + + +class ResourceRuntime(proto.Message): + r"""Persistent Cluster runtime information as output""" + + +class ServiceAccountSpec(proto.Message): + r"""Configuration for the use of custom service account to run + the workloads. + + Attributes: + enable_custom_service_account (bool): + Required. If true, custom user-managed service account is + enforced to run any workloads (for example, Vertex Jobs) on + the resource. Otherwise, uses the `Vertex AI Custom Code + Service + Agent `__. + service_account (str): + Optional. Required when all below conditions are met + + - ``enable_custom_service_account`` is true; + - any runtime is specified via ``ResourceRuntimeSpec`` on + creation time, for example, Ray + + The users must have ``iam.serviceAccounts.actAs`` permission + on this service account and then the specified runtime + containers will run as it. + + Do not set this field if you want to submit jobs using + custom service account to this PersistentResource after + creation, but only specify the ``service_account`` inside + the job. + """ + + enable_custom_service_account: bool = proto.Field( + proto.BOOL, + number=1, + ) + service_account: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/persistent_resource_service.py b/google/cloud/aiplatform_v1/types/persistent_resource_service.py new file mode 100644 index 0000000000..156f0604e0 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/persistent_resource_service.py @@ -0,0 +1,285 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import operation +from google.cloud.aiplatform_v1.types import ( + persistent_resource as gca_persistent_resource, +) +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "CreatePersistentResourceRequest", + "CreatePersistentResourceOperationMetadata", + "UpdatePersistentResourceOperationMetadata", + "RebootPersistentResourceOperationMetadata", + "GetPersistentResourceRequest", + "ListPersistentResourcesRequest", + "ListPersistentResourcesResponse", + "DeletePersistentResourceRequest", + "UpdatePersistentResourceRequest", + "RebootPersistentResourceRequest", + }, +) + + +class CreatePersistentResourceRequest(proto.Message): + r"""Request message for + [PersistentResourceService.CreatePersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.CreatePersistentResource]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + PersistentResource in. Format: + ``projects/{project}/locations/{location}`` + persistent_resource (google.cloud.aiplatform_v1.types.PersistentResource): + Required. The PersistentResource to create. + persistent_resource_id (str): + Required. The ID to use for the PersistentResource, which + become the final component of the PersistentResource's + resource name. + + The maximum length is 63 characters, and valid characters + are ``/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + persistent_resource: gca_persistent_resource.PersistentResource = proto.Field( + proto.MESSAGE, + number=2, + message=gca_persistent_resource.PersistentResource, + ) + persistent_resource_id: str = proto.Field( + proto.STRING, + number=3, + ) + + +class CreatePersistentResourceOperationMetadata(proto.Message): + r"""Details of operations that perform create PersistentResource. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for PersistentResource. + progress_message (str): + Progress Message for Create LRO + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + progress_message: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpdatePersistentResourceOperationMetadata(proto.Message): + r"""Details of operations that perform update PersistentResource. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for PersistentResource. + progress_message (str): + Progress Message for Update LRO + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + progress_message: str = proto.Field( + proto.STRING, + number=2, + ) + + +class RebootPersistentResourceOperationMetadata(proto.Message): + r"""Details of operations that perform reboot PersistentResource. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1.types.GenericOperationMetadata): + Operation metadata for PersistentResource. + progress_message (str): + Progress Message for Reboot LRO + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + progress_message: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetPersistentResourceRequest(proto.Message): + r"""Request message for + [PersistentResourceService.GetPersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.GetPersistentResource]. + + Attributes: + name (str): + Required. The name of the PersistentResource resource. + Format: + ``projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListPersistentResourcesRequest(proto.Message): + r"""Request message for + [PersistentResourceService.ListPersistentResource][]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + PersistentResources from. Format: + ``projects/{project}/locations/{location}`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via [ListPersistentResourceResponse.next_page_token][] of + the previous + [PersistentResourceService.ListPersistentResource][] call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=3, + ) + page_token: str = proto.Field( + proto.STRING, + number=4, + ) + + +class ListPersistentResourcesResponse(proto.Message): + r"""Response message for + [PersistentResourceService.ListPersistentResources][google.cloud.aiplatform.v1.PersistentResourceService.ListPersistentResources] + + Attributes: + persistent_resources (MutableSequence[google.cloud.aiplatform_v1.types.PersistentResource]): + + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListPersistentResourcesRequest.page_token][google.cloud.aiplatform.v1.ListPersistentResourcesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + persistent_resources: MutableSequence[ + gca_persistent_resource.PersistentResource + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_persistent_resource.PersistentResource, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeletePersistentResourceRequest(proto.Message): + r"""Request message for + [PersistentResourceService.DeletePersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.DeletePersistentResource]. + + Attributes: + name (str): + Required. The name of the PersistentResource to be deleted. + Format: + ``projects/{project}/locations/{location}/persistentResources/{persistent_resource}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdatePersistentResourceRequest(proto.Message): + r"""Request message for UpdatePersistentResource method. + + Attributes: + persistent_resource (google.cloud.aiplatform_v1.types.PersistentResource): + Required. The PersistentResource to update. + + The PersistentResource's ``name`` field is used to identify + the PersistentResource to update. Format: + ``projects/{project}/locations/{location}/persistentResources/{persistent_resource}`` + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Specify the fields to be + overwritten in the PersistentResource by the + update method. + """ + + persistent_resource: gca_persistent_resource.PersistentResource = proto.Field( + proto.MESSAGE, + number=1, + message=gca_persistent_resource.PersistentResource, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class RebootPersistentResourceRequest(proto.Message): + r"""Request message for + [PersistentResourceService.RebootPersistentResource][google.cloud.aiplatform.v1.PersistentResourceService.RebootPersistentResource]. + + Attributes: + name (str): + Required. The name of the PersistentResource resource. + Format: + ``projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1/types/prediction_service.py b/google/cloud/aiplatform_v1/types/prediction_service.py index 5b958c4d9b..440e190e7e 100644 --- a/google/cloud/aiplatform_v1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1/types/prediction_service.py @@ -740,6 +740,8 @@ class CountTokensResponse(proto.Message): class GenerateContentRequest(proto.Message): r"""Request message for [PredictionService.GenerateContent]. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: model (str): Required. The name of the publisher model requested to serve @@ -752,6 +754,13 @@ class GenerateContentRequest(proto.Message): instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request. + system_instruction (google.cloud.aiplatform_v1.types.Content): + Optional. The user provided system + instructions for the model. Note: only text + should be used in parts and content in each part + will be in a separate paragraph. + + This field is a member of `oneof`_ ``_system_instruction``. tools (MutableSequence[google.cloud.aiplatform_v1.types.Tool]): Optional. A list of ``Tools`` the model may use to generate the next response. @@ -776,6 +785,12 @@ class GenerateContentRequest(proto.Message): number=2, message=content.Content, ) + system_instruction: content.Content = proto.Field( + proto.MESSAGE, + number=8, + optional=True, + message=content.Content, + ) tools: MutableSequence[tool.Tool] = proto.RepeatedField( proto.MESSAGE, number=6, @@ -831,10 +846,17 @@ class BlockedReason(proto.Enum): Candidates blocked due to safety. OTHER (2): Candidates blocked due to other reason. + BLOCKLIST (3): + Candidates blocked due to the terms which are + included from the terminology blocklist. + PROHIBITED_CONTENT (4): + Candidates blocked due to prohibited content. """ BLOCKED_REASON_UNSPECIFIED = 0 SAFETY = 1 OTHER = 2 + BLOCKLIST = 3 + PROHIBITED_CONTENT = 4 block_reason: "GenerateContentResponse.PromptFeedback.BlockedReason" = ( proto.Field( diff --git a/google/cloud/aiplatform_v1/types/tool.py b/google/cloud/aiplatform_v1/types/tool.py index 42b322f875..5b953e0962 100644 --- a/google/cloud/aiplatform_v1/types/tool.py +++ b/google/cloud/aiplatform_v1/types/tool.py @@ -99,8 +99,8 @@ class FunctionDeclaration(proto.Message): name (str): Required. The name of the function to call. Must start with a letter or an underscore. - Must be a-z, A-Z, 0-9, or contain underscores - and dashes, with a maximum length of 64. + Must be a-z, A-Z, 0-9, or contain underscores, + dots and dashes, with a maximum length of 64. description (str): Optional. Description and purpose of the function. Model uses it to decide how and @@ -113,8 +113,11 @@ class FunctionDeclaration(proto.Message): case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left - unset. Example with 1 required and 1 optional - parameter: type: OBJECT properties: + unset. Parameter names must start with a letter + or an underscore and must only contain chars + a-z, A-Z, 0-9, or underscores with a maximum + length of 64. Example with 1 required and 1 + optional parameter: type: OBJECT properties: param1: @@ -233,10 +236,9 @@ class VertexAISearch(proto.Message): Attributes: datastore (str): - Required. Fully-qualified Vertex AI Search's - datastore resource ID. Format: - - projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore} + Required. Fully-qualified Vertex AI Search's datastore + resource ID. Format: + ``projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`` """ datastore: str = proto.Field( diff --git a/google/cloud/aiplatform_v1/types/tuning_job.py b/google/cloud/aiplatform_v1/types/tuning_job.py new file mode 100644 index 0000000000..f31c463c30 --- /dev/null +++ b/google/cloud/aiplatform_v1/types/tuning_job.py @@ -0,0 +1,474 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1.types import content +from google.cloud.aiplatform_v1.types import job_state +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1", + manifest={ + "TuningJob", + "TunedModel", + "SupervisedTuningDatasetDistribution", + "SupervisedTuningDataStats", + "TuningDataStats", + "SupervisedHyperParameters", + "SupervisedTuningSpec", + }, +) + + +class TuningJob(proto.Message): + r"""Represents a TuningJob that runs with Google owned models. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + base_model (str): + Model name for tuning, e.g., + "gemini-1.0-pro-002". + + This field is a member of `oneof`_ ``source_model``. + supervised_tuning_spec (google.cloud.aiplatform_v1.types.SupervisedTuningSpec): + Tuning Spec for Supervised Fine Tuning. + + This field is a member of `oneof`_ ``tuning_spec``. + name (str): + Output only. Identifier. Resource name of a TuningJob. + Format: + ``projects/{project}/locations/{location}/tuningJobs/{tuning_job}`` + tuned_model_display_name (str): + Optional. The display name of the + [TunedModel][google.cloud.aiplatform.v1.Model]. The name can + be up to 128 characters long and can consist of any UTF-8 + characters. + description (str): + Optional. The description of the + [TuningJob][google.cloud.aiplatform.v1.TuningJob]. + state (google.cloud.aiplatform_v1.types.JobState): + Output only. The detailed state of the job. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the + [TuningJob][google.cloud.aiplatform.v1.TuningJob] was + created. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the + [TuningJob][google.cloud.aiplatform.v1.TuningJob] for the + first time entered the ``JOB_STATE_RUNNING`` state. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the TuningJob entered any of the + following [JobStates][google.cloud.aiplatform.v1.JobState]: + ``JOB_STATE_SUCCEEDED``, ``JOB_STATE_FAILED``, + ``JOB_STATE_CANCELLED``, ``JOB_STATE_EXPIRED``. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Time when the + [TuningJob][google.cloud.aiplatform.v1.TuningJob] was most + recently updated. + error (google.rpc.status_pb2.Status): + Output only. Only populated when job's state is + ``JOB_STATE_FAILED`` or ``JOB_STATE_CANCELLED``. + labels (MutableMapping[str, str]): + Optional. The labels with user-defined metadata to organize + [TuningJob][google.cloud.aiplatform.v1.TuningJob] and + generated resources such as + [Model][google.cloud.aiplatform.v1.Model] and + [Endpoint][google.cloud.aiplatform.v1.Endpoint]. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. + + See https://goo.gl/xmQnxf for more information and examples + of labels. + experiment (str): + Output only. The Experiment associated with this + [TuningJob][google.cloud.aiplatform.v1.TuningJob]. + tuned_model (google.cloud.aiplatform_v1.types.TunedModel): + Output only. The tuned model resources assiociated with this + [TuningJob][google.cloud.aiplatform.v1.TuningJob]. + tuning_data_stats (google.cloud.aiplatform_v1.types.TuningDataStats): + Output only. The tuning data statistics associated with this + [TuningJob][google.cloud.aiplatform.v1.TuningJob]. + """ + + base_model: str = proto.Field( + proto.STRING, + number=4, + oneof="source_model", + ) + supervised_tuning_spec: "SupervisedTuningSpec" = proto.Field( + proto.MESSAGE, + number=5, + oneof="tuning_spec", + message="SupervisedTuningSpec", + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + tuned_model_display_name: str = proto.Field( + proto.STRING, + number=2, + ) + description: str = proto.Field( + proto.STRING, + number=3, + ) + state: job_state.JobState = proto.Field( + proto.ENUM, + number=6, + enum=job_state.JobState, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=8, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=9, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + error: status_pb2.Status = proto.Field( + proto.MESSAGE, + number=11, + message=status_pb2.Status, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=12, + ) + experiment: str = proto.Field( + proto.STRING, + number=13, + ) + tuned_model: "TunedModel" = proto.Field( + proto.MESSAGE, + number=14, + message="TunedModel", + ) + tuning_data_stats: "TuningDataStats" = proto.Field( + proto.MESSAGE, + number=15, + message="TuningDataStats", + ) + + +class TunedModel(proto.Message): + r"""The Model Registry Model and Online Prediction Endpoint assiociated + with this [TuningJob][google.cloud.aiplatform.v1.TuningJob]. + + Attributes: + model (str): + Output only. The resource name of the TunedModel. Format: + ``projects/{project}/locations/{location}/models/{model}``. + endpoint (str): + Output only. A resource name of an Endpoint. Format: + ``projects/{project}/locations/{location}/endpoints/{endpoint}``. + """ + + model: str = proto.Field( + proto.STRING, + number=1, + ) + endpoint: str = proto.Field( + proto.STRING, + number=2, + ) + + +class SupervisedTuningDatasetDistribution(proto.Message): + r"""Dataset distribution for Supervised Tuning. + + Attributes: + sum (int): + Output only. Sum of a given population of + values. + min_ (float): + Output only. The minimum of the population + values. + max_ (float): + Output only. The maximum of the population + values. + mean (float): + Output only. The arithmetic mean of the + values in the population. + median (float): + Output only. The median of the values in the + population. + p5 (float): + Output only. The 5th percentile of the values + in the population. + p95 (float): + Output only. The 95th percentile of the + values in the population. + buckets (MutableSequence[google.cloud.aiplatform_v1.types.SupervisedTuningDatasetDistribution.DatasetBucket]): + Output only. Defines the histogram bucket. + """ + + class DatasetBucket(proto.Message): + r"""Dataset bucket used to create a histogram for the + distribution given a population of values. + + Attributes: + count (float): + Output only. Number of values in the bucket. + left (float): + Output only. Left bound of the bucket. + right (float): + Output only. Right bound of the bucket. + """ + + count: float = proto.Field( + proto.DOUBLE, + number=1, + ) + left: float = proto.Field( + proto.DOUBLE, + number=2, + ) + right: float = proto.Field( + proto.DOUBLE, + number=3, + ) + + sum: int = proto.Field( + proto.INT64, + number=1, + ) + min_: float = proto.Field( + proto.DOUBLE, + number=2, + ) + max_: float = proto.Field( + proto.DOUBLE, + number=3, + ) + mean: float = proto.Field( + proto.DOUBLE, + number=4, + ) + median: float = proto.Field( + proto.DOUBLE, + number=5, + ) + p5: float = proto.Field( + proto.DOUBLE, + number=6, + ) + p95: float = proto.Field( + proto.DOUBLE, + number=7, + ) + buckets: MutableSequence[DatasetBucket] = proto.RepeatedField( + proto.MESSAGE, + number=8, + message=DatasetBucket, + ) + + +class SupervisedTuningDataStats(proto.Message): + r"""Tuning data statistics for Supervised Tuning. + + Attributes: + tuning_dataset_example_count (int): + Output only. Number of examples in the tuning + dataset. + total_tuning_character_count (int): + Output only. Number of tuning characters in + the tuning dataset. + total_billable_character_count (int): + Output only. Number of billable characters in + the tuning dataset. + tuning_step_count (int): + Output only. Number of tuning steps for this + Tuning Job. + user_input_token_distribution (google.cloud.aiplatform_v1.types.SupervisedTuningDatasetDistribution): + Output only. Dataset distributions for the + user input tokens. + user_output_token_distribution (google.cloud.aiplatform_v1.types.SupervisedTuningDatasetDistribution): + Output only. Dataset distributions for the + user output tokens. + user_message_per_example_distribution (google.cloud.aiplatform_v1.types.SupervisedTuningDatasetDistribution): + Output only. Dataset distributions for the + messages per example. + user_dataset_examples (MutableSequence[google.cloud.aiplatform_v1.types.Content]): + Output only. Sample user messages in the + training dataset uri. + """ + + tuning_dataset_example_count: int = proto.Field( + proto.INT64, + number=1, + ) + total_tuning_character_count: int = proto.Field( + proto.INT64, + number=2, + ) + total_billable_character_count: int = proto.Field( + proto.INT64, + number=3, + ) + tuning_step_count: int = proto.Field( + proto.INT64, + number=4, + ) + user_input_token_distribution: "SupervisedTuningDatasetDistribution" = proto.Field( + proto.MESSAGE, + number=5, + message="SupervisedTuningDatasetDistribution", + ) + user_output_token_distribution: "SupervisedTuningDatasetDistribution" = proto.Field( + proto.MESSAGE, + number=6, + message="SupervisedTuningDatasetDistribution", + ) + user_message_per_example_distribution: "SupervisedTuningDatasetDistribution" = ( + proto.Field( + proto.MESSAGE, + number=7, + message="SupervisedTuningDatasetDistribution", + ) + ) + user_dataset_examples: MutableSequence[content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=8, + message=content.Content, + ) + + +class TuningDataStats(proto.Message): + r"""The tuning data statistic values for + [TuningJob][google.cloud.aiplatform.v1.TuningJob]. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + supervised_tuning_data_stats (google.cloud.aiplatform_v1.types.SupervisedTuningDataStats): + The SFT Tuning data stats. + + This field is a member of `oneof`_ ``tuning_data_stats``. + """ + + supervised_tuning_data_stats: "SupervisedTuningDataStats" = proto.Field( + proto.MESSAGE, + number=1, + oneof="tuning_data_stats", + message="SupervisedTuningDataStats", + ) + + +class SupervisedHyperParameters(proto.Message): + r"""Hyperparameters for SFT. + + Attributes: + epoch_count (int): + Optional. Number of training epoches for this + tuning job. + learning_rate_multiplier (float): + Optional. Learning rate multiplier for + tuning. + adapter_size (google.cloud.aiplatform_v1.types.SupervisedHyperParameters.AdapterSize): + Optional. Adapter size for tuning. + """ + + class AdapterSize(proto.Enum): + r"""Supported adapter sizes for tuning. + + Values: + ADAPTER_SIZE_UNSPECIFIED (0): + Adapter size is unspecified. + ADAPTER_SIZE_ONE (1): + Adapter size 1. + ADAPTER_SIZE_FOUR (2): + Adapter size 4. + ADAPTER_SIZE_EIGHT (3): + Adapter size 8. + ADAPTER_SIZE_SIXTEEN (4): + Adapter size 16. + """ + ADAPTER_SIZE_UNSPECIFIED = 0 + ADAPTER_SIZE_ONE = 1 + ADAPTER_SIZE_FOUR = 2 + ADAPTER_SIZE_EIGHT = 3 + ADAPTER_SIZE_SIXTEEN = 4 + + epoch_count: int = proto.Field( + proto.INT64, + number=1, + ) + learning_rate_multiplier: float = proto.Field( + proto.DOUBLE, + number=2, + ) + adapter_size: AdapterSize = proto.Field( + proto.ENUM, + number=3, + enum=AdapterSize, + ) + + +class SupervisedTuningSpec(proto.Message): + r"""Tuning Spec for Supervised Tuning. + + Attributes: + training_dataset_uri (str): + Required. Cloud Storage path to file + containing training dataset for tuning. + validation_dataset_uri (str): + Optional. Cloud Storage path to file + containing validation dataset for tuning. + hyper_parameters (google.cloud.aiplatform_v1.types.SupervisedHyperParameters): + Optional. Hyperparameters for SFT. + """ + + training_dataset_uri: str = proto.Field( + proto.STRING, + number=1, + ) + validation_dataset_uri: str = proto.Field( + proto.STRING, + number=2, + ) + hyper_parameters: "SupervisedHyperParameters" = proto.Field( + proto.MESSAGE, + number=3, + message="SupervisedHyperParameters", + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/__init__.py b/google/cloud/aiplatform_v1beta1/__init__.py index 9e620f4e15..83996e56bb 100644 --- a/google/cloud/aiplatform_v1beta1/__init__.py +++ b/google/cloud/aiplatform_v1beta1/__init__.py @@ -28,6 +28,12 @@ ) from .services.endpoint_service import EndpointServiceClient from .services.endpoint_service import EndpointServiceAsyncClient +from .services.evaluation_service import EvaluationServiceClient +from .services.evaluation_service import EvaluationServiceAsyncClient +from .services.extension_execution_service import ExtensionExecutionServiceClient +from .services.extension_execution_service import ExtensionExecutionServiceAsyncClient +from .services.extension_registry_service import ExtensionRegistryServiceClient +from .services.extension_registry_service import ExtensionRegistryServiceAsyncClient from .services.feature_online_store_admin_service import ( FeatureOnlineStoreAdminServiceClient, ) @@ -64,6 +70,8 @@ from .services.model_garden_service import ModelGardenServiceAsyncClient from .services.model_service import ModelServiceClient from .services.model_service import ModelServiceAsyncClient +from .services.notebook_service import NotebookServiceClient +from .services.notebook_service import NotebookServiceAsyncClient from .services.persistent_resource_service import PersistentResourceServiceClient from .services.persistent_resource_service import PersistentResourceServiceAsyncClient from .services.pipeline_service import PipelineServiceClient @@ -84,6 +92,10 @@ from .services.specialist_pool_service import SpecialistPoolServiceAsyncClient from .services.tensorboard_service import TensorboardServiceClient from .services.tensorboard_service import TensorboardServiceAsyncClient +from .services.vertex_rag_data_service import VertexRagDataServiceClient +from .services.vertex_rag_data_service import VertexRagDataServiceAsyncClient +from .services.vertex_rag_service import VertexRagServiceClient +from .services.vertex_rag_service import VertexRagServiceAsyncClient from .services.vizier_service import VizierServiceClient from .services.vizier_service import VizierServiceAsyncClient @@ -199,6 +211,100 @@ from .types.evaluated_annotation import ErrorAnalysisAnnotation from .types.evaluated_annotation import EvaluatedAnnotation from .types.evaluated_annotation import EvaluatedAnnotationExplanation +from .types.evaluation_service import BleuInput +from .types.evaluation_service import BleuInstance +from .types.evaluation_service import BleuMetricValue +from .types.evaluation_service import BleuResults +from .types.evaluation_service import BleuSpec +from .types.evaluation_service import CoherenceInput +from .types.evaluation_service import CoherenceInstance +from .types.evaluation_service import CoherenceResult +from .types.evaluation_service import CoherenceSpec +from .types.evaluation_service import EvaluateInstancesRequest +from .types.evaluation_service import EvaluateInstancesResponse +from .types.evaluation_service import ExactMatchInput +from .types.evaluation_service import ExactMatchInstance +from .types.evaluation_service import ExactMatchMetricValue +from .types.evaluation_service import ExactMatchResults +from .types.evaluation_service import ExactMatchSpec +from .types.evaluation_service import FluencyInput +from .types.evaluation_service import FluencyInstance +from .types.evaluation_service import FluencyResult +from .types.evaluation_service import FluencySpec +from .types.evaluation_service import FulfillmentInput +from .types.evaluation_service import FulfillmentInstance +from .types.evaluation_service import FulfillmentResult +from .types.evaluation_service import FulfillmentSpec +from .types.evaluation_service import GroundednessInput +from .types.evaluation_service import GroundednessInstance +from .types.evaluation_service import GroundednessResult +from .types.evaluation_service import GroundednessSpec +from .types.evaluation_service import PairwiseQuestionAnsweringQualityInput +from .types.evaluation_service import PairwiseQuestionAnsweringQualityInstance +from .types.evaluation_service import PairwiseQuestionAnsweringQualityResult +from .types.evaluation_service import PairwiseQuestionAnsweringQualitySpec +from .types.evaluation_service import PairwiseSummarizationQualityInput +from .types.evaluation_service import PairwiseSummarizationQualityInstance +from .types.evaluation_service import PairwiseSummarizationQualityResult +from .types.evaluation_service import PairwiseSummarizationQualitySpec +from .types.evaluation_service import QuestionAnsweringCorrectnessInput +from .types.evaluation_service import QuestionAnsweringCorrectnessInstance +from .types.evaluation_service import QuestionAnsweringCorrectnessResult +from .types.evaluation_service import QuestionAnsweringCorrectnessSpec +from .types.evaluation_service import QuestionAnsweringHelpfulnessInput +from .types.evaluation_service import QuestionAnsweringHelpfulnessInstance +from .types.evaluation_service import QuestionAnsweringHelpfulnessResult +from .types.evaluation_service import QuestionAnsweringHelpfulnessSpec +from .types.evaluation_service import QuestionAnsweringQualityInput +from .types.evaluation_service import QuestionAnsweringQualityInstance +from .types.evaluation_service import QuestionAnsweringQualityResult +from .types.evaluation_service import QuestionAnsweringQualitySpec +from .types.evaluation_service import QuestionAnsweringRelevanceInput +from .types.evaluation_service import QuestionAnsweringRelevanceInstance +from .types.evaluation_service import QuestionAnsweringRelevanceResult +from .types.evaluation_service import QuestionAnsweringRelevanceSpec +from .types.evaluation_service import RougeInput +from .types.evaluation_service import RougeInstance +from .types.evaluation_service import RougeMetricValue +from .types.evaluation_service import RougeResults +from .types.evaluation_service import RougeSpec +from .types.evaluation_service import SafetyInput +from .types.evaluation_service import SafetyInstance +from .types.evaluation_service import SafetyResult +from .types.evaluation_service import SafetySpec +from .types.evaluation_service import SummarizationHelpfulnessInput +from .types.evaluation_service import SummarizationHelpfulnessInstance +from .types.evaluation_service import SummarizationHelpfulnessResult +from .types.evaluation_service import SummarizationHelpfulnessSpec +from .types.evaluation_service import SummarizationQualityInput +from .types.evaluation_service import SummarizationQualityInstance +from .types.evaluation_service import SummarizationQualityResult +from .types.evaluation_service import SummarizationQualitySpec +from .types.evaluation_service import SummarizationVerbosityInput +from .types.evaluation_service import SummarizationVerbosityInstance +from .types.evaluation_service import SummarizationVerbosityResult +from .types.evaluation_service import SummarizationVerbositySpec +from .types.evaluation_service import ToolCallValidInput +from .types.evaluation_service import ToolCallValidInstance +from .types.evaluation_service import ToolCallValidMetricValue +from .types.evaluation_service import ToolCallValidResults +from .types.evaluation_service import ToolCallValidSpec +from .types.evaluation_service import ToolNameMatchInput +from .types.evaluation_service import ToolNameMatchInstance +from .types.evaluation_service import ToolNameMatchMetricValue +from .types.evaluation_service import ToolNameMatchResults +from .types.evaluation_service import ToolNameMatchSpec +from .types.evaluation_service import ToolParameterKeyMatchInput +from .types.evaluation_service import ToolParameterKeyMatchInstance +from .types.evaluation_service import ToolParameterKeyMatchMetricValue +from .types.evaluation_service import ToolParameterKeyMatchResults +from .types.evaluation_service import ToolParameterKeyMatchSpec +from .types.evaluation_service import ToolParameterKVMatchInput +from .types.evaluation_service import ToolParameterKVMatchInstance +from .types.evaluation_service import ToolParameterKVMatchMetricValue +from .types.evaluation_service import ToolParameterKVMatchResults +from .types.evaluation_service import ToolParameterKVMatchSpec +from .types.evaluation_service import PairwiseChoice from .types.event import Event from .types.execution import Execution from .types.explanation import Attribution @@ -220,6 +326,25 @@ from .types.explanation import SmoothGradConfig from .types.explanation import XraiAttribution from .types.explanation_metadata import ExplanationMetadata +from .types.extension import AuthConfig +from .types.extension import Extension +from .types.extension import ExtensionManifest +from .types.extension import ExtensionOperation +from .types.extension import ExtensionPrivateServiceConnectConfig +from .types.extension import RuntimeConfig +from .types.extension import AuthType +from .types.extension import HttpElementLocation +from .types.extension_execution_service import ExecuteExtensionRequest +from .types.extension_execution_service import ExecuteExtensionResponse +from .types.extension_execution_service import QueryExtensionRequest +from .types.extension_execution_service import QueryExtensionResponse +from .types.extension_registry_service import DeleteExtensionRequest +from .types.extension_registry_service import GetExtensionRequest +from .types.extension_registry_service import ImportExtensionOperationMetadata +from .types.extension_registry_service import ImportExtensionRequest +from .types.extension_registry_service import ListExtensionsRequest +from .types.extension_registry_service import ListExtensionsResponse +from .types.extension_registry_service import UpdateExtensionRequest from .types.feature import Feature from .types.feature_group import FeatureGroup from .types.feature_monitoring_stats import FeatureStatsAnomaly @@ -368,8 +493,10 @@ from .types.io import ContainerRegistryDestination from .types.io import CsvDestination from .types.io import CsvSource +from .types.io import DirectUploadSource from .types.io import GcsDestination from .types.io import GcsSource +from .types.io import GoogleDriveSource from .types.io import TFRecordDestination from .types.job_service import CancelBatchPredictionJobRequest from .types.job_service import CancelCustomJobRequest @@ -429,6 +556,7 @@ from .types.machine_resources import NfsMount from .types.machine_resources import PersistentDiskSpec from .types.machine_resources import ResourcesConsumed +from .types.machine_resources import ShieldedVmConfig from .types.manual_batch_tuning_parameters import ManualBatchTuningParameters from .types.match_service import FindNeighborsRequest from .types.match_service import FindNeighborsResponse @@ -564,6 +692,31 @@ from .types.nas_job import NasJobSpec from .types.nas_job import NasTrial from .types.nas_job import NasTrialDetail +from .types.network_spec import NetworkSpec +from .types.notebook_euc_config import NotebookEucConfig +from .types.notebook_idle_shutdown_config import NotebookIdleShutdownConfig +from .types.notebook_runtime import NotebookRuntime +from .types.notebook_runtime import NotebookRuntimeTemplate +from .types.notebook_runtime import NotebookRuntimeType +from .types.notebook_runtime_template_ref import NotebookRuntimeTemplateRef +from .types.notebook_service import AssignNotebookRuntimeOperationMetadata +from .types.notebook_service import AssignNotebookRuntimeRequest +from .types.notebook_service import CreateNotebookRuntimeTemplateOperationMetadata +from .types.notebook_service import CreateNotebookRuntimeTemplateRequest +from .types.notebook_service import DeleteNotebookRuntimeRequest +from .types.notebook_service import DeleteNotebookRuntimeTemplateRequest +from .types.notebook_service import GetNotebookRuntimeRequest +from .types.notebook_service import GetNotebookRuntimeTemplateRequest +from .types.notebook_service import ListNotebookRuntimesRequest +from .types.notebook_service import ListNotebookRuntimesResponse +from .types.notebook_service import ListNotebookRuntimeTemplatesRequest +from .types.notebook_service import ListNotebookRuntimeTemplatesResponse +from .types.notebook_service import StartNotebookRuntimeOperationMetadata +from .types.notebook_service import StartNotebookRuntimeRequest +from .types.notebook_service import StartNotebookRuntimeResponse +from .types.notebook_service import UpgradeNotebookRuntimeOperationMetadata +from .types.notebook_service import UpgradeNotebookRuntimeRequest +from .types.notebook_service import UpgradeNotebookRuntimeResponse from .types.openapi import Schema from .types.openapi import Type from .types.operation import DeleteOperationMetadata @@ -581,6 +734,8 @@ from .types.persistent_resource_service import GetPersistentResourceRequest from .types.persistent_resource_service import ListPersistentResourcesRequest from .types.persistent_resource_service import ListPersistentResourcesResponse +from .types.persistent_resource_service import RebootPersistentResourceOperationMetadata +from .types.persistent_resource_service import RebootPersistentResourceRequest from .types.persistent_resource_service import UpdatePersistentResourceOperationMetadata from .types.persistent_resource_service import UpdatePersistentResourceRequest from .types.pipeline_failure_policy import PipelineFailurePolicy @@ -607,6 +762,7 @@ from .types.pipeline_service import ListTrainingPipelinesRequest from .types.pipeline_service import ListTrainingPipelinesResponse from .types.pipeline_state import PipelineState +from .types.prediction_service import ChatCompletionsRequest from .types.prediction_service import CountTokensRequest from .types.prediction_service import CountTokensResponse from .types.prediction_service import DirectPredictRequest @@ -730,7 +886,9 @@ from .types.tool import Retrieval from .types.tool import Tool from .types.tool import ToolConfig +from .types.tool import ToolUseExample from .types.tool import VertexAISearch +from .types.tool import VertexRagStore from .types.training_pipeline import FilterSplit from .types.training_pipeline import FractionSplit from .types.training_pipeline import InputDataConfig @@ -746,6 +904,30 @@ from .types.unmanaged_container_model import UnmanagedContainerModel from .types.user_action_reference import UserActionReference from .types.value import Value +from .types.vertex_rag_data import ImportRagFilesConfig +from .types.vertex_rag_data import RagCorpus +from .types.vertex_rag_data import RagFile +from .types.vertex_rag_data import RagFileChunkingConfig +from .types.vertex_rag_data import UploadRagFileConfig +from .types.vertex_rag_data_service import CreateRagCorpusOperationMetadata +from .types.vertex_rag_data_service import CreateRagCorpusRequest +from .types.vertex_rag_data_service import DeleteRagCorpusRequest +from .types.vertex_rag_data_service import DeleteRagFileRequest +from .types.vertex_rag_data_service import GetRagCorpusRequest +from .types.vertex_rag_data_service import GetRagFileRequest +from .types.vertex_rag_data_service import ImportRagFilesOperationMetadata +from .types.vertex_rag_data_service import ImportRagFilesRequest +from .types.vertex_rag_data_service import ImportRagFilesResponse +from .types.vertex_rag_data_service import ListRagCorporaRequest +from .types.vertex_rag_data_service import ListRagCorporaResponse +from .types.vertex_rag_data_service import ListRagFilesRequest +from .types.vertex_rag_data_service import ListRagFilesResponse +from .types.vertex_rag_data_service import UploadRagFileRequest +from .types.vertex_rag_data_service import UploadRagFileResponse +from .types.vertex_rag_service import RagContexts +from .types.vertex_rag_service import RagQuery +from .types.vertex_rag_service import RetrieveContextsRequest +from .types.vertex_rag_service import RetrieveContextsResponse from .types.vizier_service import AddTrialMeasurementRequest from .types.vizier_service import CheckTrialEarlyStoppingStateMetatdata from .types.vizier_service import CheckTrialEarlyStoppingStateRequest @@ -773,6 +955,9 @@ "DatasetServiceAsyncClient", "DeploymentResourcePoolServiceAsyncClient", "EndpointServiceAsyncClient", + "EvaluationServiceAsyncClient", + "ExtensionExecutionServiceAsyncClient", + "ExtensionRegistryServiceAsyncClient", "FeatureOnlineStoreAdminServiceAsyncClient", "FeatureOnlineStoreServiceAsyncClient", "FeatureRegistryServiceAsyncClient", @@ -787,6 +972,7 @@ "MigrationServiceAsyncClient", "ModelGardenServiceAsyncClient", "ModelServiceAsyncClient", + "NotebookServiceAsyncClient", "PersistentResourceServiceAsyncClient", "PipelineServiceAsyncClient", "PredictionServiceAsyncClient", @@ -795,6 +981,8 @@ "ScheduleServiceAsyncClient", "SpecialistPoolServiceAsyncClient", "TensorboardServiceAsyncClient", + "VertexRagDataServiceAsyncClient", + "VertexRagServiceAsyncClient", "VizierServiceAsyncClient", "AcceleratorType", "ActiveLearningConfig", @@ -808,7 +996,11 @@ "Annotation", "AnnotationSpec", "Artifact", + "AssignNotebookRuntimeOperationMetadata", + "AssignNotebookRuntimeRequest", "Attribution", + "AuthConfig", + "AuthType", "AutomaticResources", "AutoscalingMetricSpec", "AvroSource", @@ -840,6 +1032,11 @@ "BatchReadTensorboardTimeSeriesDataResponse", "BigQueryDestination", "BigQuerySource", + "BleuInput", + "BleuInstance", + "BleuMetricValue", + "BleuResults", + "BleuSpec", "Blob", "BlurBaselineConfig", "BoolArray", @@ -851,11 +1048,16 @@ "CancelPipelineJobRequest", "CancelTrainingPipelineRequest", "Candidate", + "ChatCompletionsRequest", "CheckTrialEarlyStoppingStateMetatdata", "CheckTrialEarlyStoppingStateRequest", "CheckTrialEarlyStoppingStateResponse", "Citation", "CitationMetadata", + "CoherenceInput", + "CoherenceInstance", + "CoherenceResult", + "CoherenceSpec", "CompleteTrialRequest", "CompletionStats", "ComputeTokensRequest", @@ -905,9 +1107,13 @@ "CreateMetadataStoreRequest", "CreateModelDeploymentMonitoringJobRequest", "CreateNasJobRequest", + "CreateNotebookRuntimeTemplateOperationMetadata", + "CreateNotebookRuntimeTemplateRequest", "CreatePersistentResourceOperationMetadata", "CreatePersistentResourceRequest", "CreatePipelineJobRequest", + "CreateRagCorpusOperationMetadata", + "CreateRagCorpusRequest", "CreateReasoningEngineOperationMetadata", "CreateReasoningEngineRequest", "CreateRegistryFeatureOperationMetadata", @@ -944,6 +1150,7 @@ "DeleteEndpointRequest", "DeleteEntityTypeRequest", "DeleteExecutionRequest", + "DeleteExtensionRequest", "DeleteFeatureGroupRequest", "DeleteFeatureOnlineStoreRequest", "DeleteFeatureRequest", @@ -961,9 +1168,13 @@ "DeleteModelRequest", "DeleteModelVersionRequest", "DeleteNasJobRequest", + "DeleteNotebookRuntimeRequest", + "DeleteNotebookRuntimeTemplateRequest", "DeleteOperationMetadata", "DeletePersistentResourceRequest", "DeletePipelineJobRequest", + "DeleteRagCorpusRequest", + "DeleteRagFileRequest", "DeleteReasoningEngineRequest", "DeleteSavedQueryRequest", "DeleteScheduleRequest", @@ -993,6 +1204,7 @@ "DirectPredictResponse", "DirectRawPredictRequest", "DirectRawPredictResponse", + "DirectUploadSource", "DiskSpec", "DoubleArray", "EncryptionSpec", @@ -1002,12 +1214,22 @@ "EntityType", "EnvVar", "ErrorAnalysisAnnotation", + "EvaluateInstancesRequest", + "EvaluateInstancesResponse", "EvaluatedAnnotation", "EvaluatedAnnotationExplanation", + "EvaluationServiceClient", "Event", + "ExactMatchInput", + "ExactMatchInstance", + "ExactMatchMetricValue", + "ExactMatchResults", + "ExactMatchSpec", "Examples", "ExamplesOverride", "ExamplesRestrictionsNamespace", + "ExecuteExtensionRequest", + "ExecuteExtensionResponse", "Execution", "ExplainRequest", "ExplainResponse", @@ -1030,6 +1252,12 @@ "ExportModelResponse", "ExportTensorboardTimeSeriesDataRequest", "ExportTensorboardTimeSeriesDataResponse", + "Extension", + "ExtensionExecutionServiceClient", + "ExtensionManifest", + "ExtensionOperation", + "ExtensionPrivateServiceConnectConfig", + "ExtensionRegistryServiceClient", "Feature", "FeatureGroup", "FeatureNoiseSigma", @@ -1056,7 +1284,15 @@ "FilterSplit", "FindNeighborsRequest", "FindNeighborsResponse", + "FluencyInput", + "FluencyInstance", + "FluencyResult", + "FluencySpec", "FractionSplit", + "FulfillmentInput", + "FulfillmentInstance", + "FulfillmentResult", + "FulfillmentSpec", "FunctionCall", "FunctionCallingConfig", "FunctionDeclaration", @@ -1080,6 +1316,7 @@ "GetEndpointRequest", "GetEntityTypeRequest", "GetExecutionRequest", + "GetExtensionRequest", "GetFeatureGroupRequest", "GetFeatureOnlineStoreRequest", "GetFeatureRequest", @@ -1097,9 +1334,13 @@ "GetModelRequest", "GetNasJobRequest", "GetNasTrialDetailRequest", + "GetNotebookRuntimeRequest", + "GetNotebookRuntimeTemplateRequest", "GetPersistentResourceRequest", "GetPipelineJobRequest", "GetPublisherModelRequest", + "GetRagCorpusRequest", + "GetRagFileRequest", "GetReasoningEngineRequest", "GetScheduleRequest", "GetSpecialistPoolRequest", @@ -1110,20 +1351,32 @@ "GetTensorboardTimeSeriesRequest", "GetTrainingPipelineRequest", "GetTrialRequest", + "GoogleDriveSource", "GoogleSearchRetrieval", + "GroundednessInput", + "GroundednessInstance", + "GroundednessResult", + "GroundednessSpec", "GroundingAttribution", "GroundingMetadata", "HarmCategory", + "HttpElementLocation", "HyperparameterTuningJob", "IdMatcher", "ImportDataConfig", "ImportDataOperationMetadata", "ImportDataRequest", "ImportDataResponse", + "ImportExtensionOperationMetadata", + "ImportExtensionRequest", "ImportFeatureValuesOperationMetadata", "ImportFeatureValuesRequest", "ImportFeatureValuesResponse", "ImportModelEvaluationRequest", + "ImportRagFilesConfig", + "ImportRagFilesOperationMetadata", + "ImportRagFilesRequest", + "ImportRagFilesResponse", "Index", "IndexDatapoint", "IndexEndpoint", @@ -1164,6 +1417,8 @@ "ListEntityTypesResponse", "ListExecutionsRequest", "ListExecutionsResponse", + "ListExtensionsRequest", + "ListExtensionsResponse", "ListFeatureGroupsRequest", "ListFeatureGroupsResponse", "ListFeatureOnlineStoresRequest", @@ -1200,6 +1455,10 @@ "ListNasJobsResponse", "ListNasTrialDetailsRequest", "ListNasTrialDetailsResponse", + "ListNotebookRuntimeTemplatesRequest", + "ListNotebookRuntimeTemplatesResponse", + "ListNotebookRuntimesRequest", + "ListNotebookRuntimesResponse", "ListOptimalTrialsRequest", "ListOptimalTrialsResponse", "ListPersistentResourcesRequest", @@ -1208,6 +1467,10 @@ "ListPipelineJobsResponse", "ListPublisherModelsRequest", "ListPublisherModelsResponse", + "ListRagCorporaRequest", + "ListRagCorporaResponse", + "ListRagFilesRequest", + "ListRagFilesResponse", "ListReasoningEnginesRequest", "ListReasoningEnginesResponse", "ListSavedQueriesRequest", @@ -1277,7 +1540,24 @@ "NearestNeighborSearchOperationMetadata", "NearestNeighbors", "Neighbor", + "NetworkSpec", "NfsMount", + "NotebookEucConfig", + "NotebookIdleShutdownConfig", + "NotebookRuntime", + "NotebookRuntimeTemplate", + "NotebookRuntimeTemplateRef", + "NotebookRuntimeType", + "NotebookServiceClient", + "PairwiseChoice", + "PairwiseQuestionAnsweringQualityInput", + "PairwiseQuestionAnsweringQualityInstance", + "PairwiseQuestionAnsweringQualityResult", + "PairwiseQuestionAnsweringQualitySpec", + "PairwiseSummarizationQualityInput", + "PairwiseSummarizationQualityInstance", + "PairwiseSummarizationQualityResult", + "PairwiseSummarizationQualitySpec", "Part", "PauseModelDeploymentMonitoringJobRequest", "PauseScheduleRequest", @@ -1321,8 +1601,31 @@ "QueryDeployedModelsRequest", "QueryDeployedModelsResponse", "QueryExecutionInputsAndOutputsRequest", + "QueryExtensionRequest", + "QueryExtensionResponse", "QueryReasoningEngineRequest", "QueryReasoningEngineResponse", + "QuestionAnsweringCorrectnessInput", + "QuestionAnsweringCorrectnessInstance", + "QuestionAnsweringCorrectnessResult", + "QuestionAnsweringCorrectnessSpec", + "QuestionAnsweringHelpfulnessInput", + "QuestionAnsweringHelpfulnessInstance", + "QuestionAnsweringHelpfulnessResult", + "QuestionAnsweringHelpfulnessSpec", + "QuestionAnsweringQualityInput", + "QuestionAnsweringQualityInstance", + "QuestionAnsweringQualityResult", + "QuestionAnsweringQualitySpec", + "QuestionAnsweringRelevanceInput", + "QuestionAnsweringRelevanceInstance", + "QuestionAnsweringRelevanceResult", + "QuestionAnsweringRelevanceSpec", + "RagContexts", + "RagCorpus", + "RagFile", + "RagFileChunkingConfig", + "RagQuery", "RawPredictRequest", "RayMetricSpec", "RaySpec", @@ -1342,6 +1645,8 @@ "ReasoningEngineExecutionServiceClient", "ReasoningEngineServiceClient", "ReasoningEngineSpec", + "RebootPersistentResourceOperationMetadata", + "RebootPersistentResourceRequest", "RemoveContextChildrenRequest", "RemoveContextChildrenResponse", "RemoveDatapointsRequest", @@ -1355,8 +1660,20 @@ "ResumeModelDeploymentMonitoringJobRequest", "ResumeScheduleRequest", "Retrieval", + "RetrieveContextsRequest", + "RetrieveContextsResponse", + "RougeInput", + "RougeInstance", + "RougeMetricValue", + "RougeResults", + "RougeSpec", + "RuntimeConfig", + "SafetyInput", + "SafetyInstance", "SafetyRating", + "SafetyResult", "SafetySetting", + "SafetySpec", "SampleConfig", "SampledShapleyAttribution", "SamplingStrategy", @@ -1378,9 +1695,13 @@ "SearchNearestEntitiesResponse", "Segment", "ServiceAccountSpec", + "ShieldedVmConfig", "SmoothGradConfig", "SpecialistPool", "SpecialistPoolServiceClient", + "StartNotebookRuntimeOperationMetadata", + "StartNotebookRuntimeRequest", + "StartNotebookRuntimeResponse", "StopTrialRequest", "StratifiedSplit", "StreamDirectPredictRequest", @@ -1401,6 +1722,18 @@ "SuggestTrialsMetadata", "SuggestTrialsRequest", "SuggestTrialsResponse", + "SummarizationHelpfulnessInput", + "SummarizationHelpfulnessInstance", + "SummarizationHelpfulnessResult", + "SummarizationHelpfulnessSpec", + "SummarizationQualityInput", + "SummarizationQualityInstance", + "SummarizationQualityResult", + "SummarizationQualitySpec", + "SummarizationVerbosityInput", + "SummarizationVerbosityInstance", + "SummarizationVerbosityResult", + "SummarizationVerbositySpec", "SyncFeatureViewRequest", "SyncFeatureViewResponse", "TFRecordDestination", @@ -1419,7 +1752,28 @@ "TimestampSplit", "TokensInfo", "Tool", + "ToolCallValidInput", + "ToolCallValidInstance", + "ToolCallValidMetricValue", + "ToolCallValidResults", + "ToolCallValidSpec", "ToolConfig", + "ToolNameMatchInput", + "ToolNameMatchInstance", + "ToolNameMatchMetricValue", + "ToolNameMatchResults", + "ToolNameMatchSpec", + "ToolParameterKVMatchInput", + "ToolParameterKVMatchInstance", + "ToolParameterKVMatchMetricValue", + "ToolParameterKVMatchResults", + "ToolParameterKVMatchSpec", + "ToolParameterKeyMatchInput", + "ToolParameterKeyMatchInstance", + "ToolParameterKeyMatchMetricValue", + "ToolParameterKeyMatchResults", + "ToolParameterKeyMatchSpec", + "ToolUseExample", "TrainingConfig", "TrainingPipeline", "Trial", @@ -1442,6 +1796,7 @@ "UpdateExplanationDatasetOperationMetadata", "UpdateExplanationDatasetRequest", "UpdateExplanationDatasetResponse", + "UpdateExtensionRequest", "UpdateFeatureGroupOperationMetadata", "UpdateFeatureGroupRequest", "UpdateFeatureOnlineStoreOperationMetadata", @@ -1468,14 +1823,23 @@ "UpdateTensorboardRequest", "UpdateTensorboardRunRequest", "UpdateTensorboardTimeSeriesRequest", + "UpgradeNotebookRuntimeOperationMetadata", + "UpgradeNotebookRuntimeRequest", + "UpgradeNotebookRuntimeResponse", "UploadModelOperationMetadata", "UploadModelRequest", "UploadModelResponse", + "UploadRagFileConfig", + "UploadRagFileRequest", + "UploadRagFileResponse", "UpsertDatapointsRequest", "UpsertDatapointsResponse", "UserActionReference", "Value", "VertexAISearch", + "VertexRagDataServiceClient", + "VertexRagServiceClient", + "VertexRagStore", "VideoMetadata", "VizierServiceClient", "WorkerPoolSpec", diff --git a/google/cloud/aiplatform_v1beta1/gapic_metadata.json b/google/cloud/aiplatform_v1beta1/gapic_metadata.json index 4e3fa91f72..a659d266cc 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_metadata.json +++ b/google/cloud/aiplatform_v1beta1/gapic_metadata.json @@ -527,6 +527,183 @@ } } }, + "EvaluationService": { + "clients": { + "grpc": { + "libraryClient": "EvaluationServiceClient", + "rpcs": { + "EvaluateInstances": { + "methods": [ + "evaluate_instances" + ] + } + } + }, + "grpc-async": { + "libraryClient": "EvaluationServiceAsyncClient", + "rpcs": { + "EvaluateInstances": { + "methods": [ + "evaluate_instances" + ] + } + } + }, + "rest": { + "libraryClient": "EvaluationServiceClient", + "rpcs": { + "EvaluateInstances": { + "methods": [ + "evaluate_instances" + ] + } + } + } + } + }, + "ExtensionExecutionService": { + "clients": { + "grpc": { + "libraryClient": "ExtensionExecutionServiceClient", + "rpcs": { + "ExecuteExtension": { + "methods": [ + "execute_extension" + ] + }, + "QueryExtension": { + "methods": [ + "query_extension" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ExtensionExecutionServiceAsyncClient", + "rpcs": { + "ExecuteExtension": { + "methods": [ + "execute_extension" + ] + }, + "QueryExtension": { + "methods": [ + "query_extension" + ] + } + } + }, + "rest": { + "libraryClient": "ExtensionExecutionServiceClient", + "rpcs": { + "ExecuteExtension": { + "methods": [ + "execute_extension" + ] + }, + "QueryExtension": { + "methods": [ + "query_extension" + ] + } + } + } + } + }, + "ExtensionRegistryService": { + "clients": { + "grpc": { + "libraryClient": "ExtensionRegistryServiceClient", + "rpcs": { + "DeleteExtension": { + "methods": [ + "delete_extension" + ] + }, + "GetExtension": { + "methods": [ + "get_extension" + ] + }, + "ImportExtension": { + "methods": [ + "import_extension" + ] + }, + "ListExtensions": { + "methods": [ + "list_extensions" + ] + }, + "UpdateExtension": { + "methods": [ + "update_extension" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ExtensionRegistryServiceAsyncClient", + "rpcs": { + "DeleteExtension": { + "methods": [ + "delete_extension" + ] + }, + "GetExtension": { + "methods": [ + "get_extension" + ] + }, + "ImportExtension": { + "methods": [ + "import_extension" + ] + }, + "ListExtensions": { + "methods": [ + "list_extensions" + ] + }, + "UpdateExtension": { + "methods": [ + "update_extension" + ] + } + } + }, + "rest": { + "libraryClient": "ExtensionRegistryServiceClient", + "rpcs": { + "DeleteExtension": { + "methods": [ + "delete_extension" + ] + }, + "GetExtension": { + "methods": [ + "get_extension" + ] + }, + "ImportExtension": { + "methods": [ + "import_extension" + ] + }, + "ListExtensions": { + "methods": [ + "list_extensions" + ] + }, + "UpdateExtension": { + "methods": [ + "update_extension" + ] + } + } + } + } + }, "FeatureOnlineStoreAdminService": { "clients": { "grpc": { @@ -3148,6 +3325,175 @@ } } }, + "NotebookService": { + "clients": { + "grpc": { + "libraryClient": "NotebookServiceClient", + "rpcs": { + "AssignNotebookRuntime": { + "methods": [ + "assign_notebook_runtime" + ] + }, + "CreateNotebookRuntimeTemplate": { + "methods": [ + "create_notebook_runtime_template" + ] + }, + "DeleteNotebookRuntime": { + "methods": [ + "delete_notebook_runtime" + ] + }, + "DeleteNotebookRuntimeTemplate": { + "methods": [ + "delete_notebook_runtime_template" + ] + }, + "GetNotebookRuntime": { + "methods": [ + "get_notebook_runtime" + ] + }, + "GetNotebookRuntimeTemplate": { + "methods": [ + "get_notebook_runtime_template" + ] + }, + "ListNotebookRuntimeTemplates": { + "methods": [ + "list_notebook_runtime_templates" + ] + }, + "ListNotebookRuntimes": { + "methods": [ + "list_notebook_runtimes" + ] + }, + "StartNotebookRuntime": { + "methods": [ + "start_notebook_runtime" + ] + }, + "UpgradeNotebookRuntime": { + "methods": [ + "upgrade_notebook_runtime" + ] + } + } + }, + "grpc-async": { + "libraryClient": "NotebookServiceAsyncClient", + "rpcs": { + "AssignNotebookRuntime": { + "methods": [ + "assign_notebook_runtime" + ] + }, + "CreateNotebookRuntimeTemplate": { + "methods": [ + "create_notebook_runtime_template" + ] + }, + "DeleteNotebookRuntime": { + "methods": [ + "delete_notebook_runtime" + ] + }, + "DeleteNotebookRuntimeTemplate": { + "methods": [ + "delete_notebook_runtime_template" + ] + }, + "GetNotebookRuntime": { + "methods": [ + "get_notebook_runtime" + ] + }, + "GetNotebookRuntimeTemplate": { + "methods": [ + "get_notebook_runtime_template" + ] + }, + "ListNotebookRuntimeTemplates": { + "methods": [ + "list_notebook_runtime_templates" + ] + }, + "ListNotebookRuntimes": { + "methods": [ + "list_notebook_runtimes" + ] + }, + "StartNotebookRuntime": { + "methods": [ + "start_notebook_runtime" + ] + }, + "UpgradeNotebookRuntime": { + "methods": [ + "upgrade_notebook_runtime" + ] + } + } + }, + "rest": { + "libraryClient": "NotebookServiceClient", + "rpcs": { + "AssignNotebookRuntime": { + "methods": [ + "assign_notebook_runtime" + ] + }, + "CreateNotebookRuntimeTemplate": { + "methods": [ + "create_notebook_runtime_template" + ] + }, + "DeleteNotebookRuntime": { + "methods": [ + "delete_notebook_runtime" + ] + }, + "DeleteNotebookRuntimeTemplate": { + "methods": [ + "delete_notebook_runtime_template" + ] + }, + "GetNotebookRuntime": { + "methods": [ + "get_notebook_runtime" + ] + }, + "GetNotebookRuntimeTemplate": { + "methods": [ + "get_notebook_runtime_template" + ] + }, + "ListNotebookRuntimeTemplates": { + "methods": [ + "list_notebook_runtime_templates" + ] + }, + "ListNotebookRuntimes": { + "methods": [ + "list_notebook_runtimes" + ] + }, + "StartNotebookRuntime": { + "methods": [ + "start_notebook_runtime" + ] + }, + "UpgradeNotebookRuntime": { + "methods": [ + "upgrade_notebook_runtime" + ] + } + } + } + } + }, "PersistentResourceService": { "clients": { "grpc": { @@ -3173,6 +3519,11 @@ "list_persistent_resources" ] }, + "RebootPersistentResource": { + "methods": [ + "reboot_persistent_resource" + ] + }, "UpdatePersistentResource": { "methods": [ "update_persistent_resource" @@ -3203,6 +3554,11 @@ "list_persistent_resources" ] }, + "RebootPersistentResource": { + "methods": [ + "reboot_persistent_resource" + ] + }, "UpdatePersistentResource": { "methods": [ "update_persistent_resource" @@ -3233,6 +3589,11 @@ "list_persistent_resources" ] }, + "RebootPersistentResource": { + "methods": [ + "reboot_persistent_resource" + ] + }, "UpdatePersistentResource": { "methods": [ "update_persistent_resource" @@ -3446,6 +3807,11 @@ "grpc": { "libraryClient": "PredictionServiceClient", "rpcs": { + "ChatCompletions": { + "methods": [ + "chat_completions" + ] + }, "CountTokens": { "methods": [ "count_tokens" @@ -3516,6 +3882,11 @@ "grpc-async": { "libraryClient": "PredictionServiceAsyncClient", "rpcs": { + "ChatCompletions": { + "methods": [ + "chat_completions" + ] + }, "CountTokens": { "methods": [ "count_tokens" @@ -3586,6 +3957,11 @@ "rest": { "libraryClient": "PredictionServiceClient", "rpcs": { + "ChatCompletions": { + "methods": [ + "chat_completions" + ] + }, "CountTokens": { "methods": [ "count_tokens" @@ -4455,6 +4831,194 @@ } } }, + "VertexRagDataService": { + "clients": { + "grpc": { + "libraryClient": "VertexRagDataServiceClient", + "rpcs": { + "CreateRagCorpus": { + "methods": [ + "create_rag_corpus" + ] + }, + "DeleteRagCorpus": { + "methods": [ + "delete_rag_corpus" + ] + }, + "DeleteRagFile": { + "methods": [ + "delete_rag_file" + ] + }, + "GetRagCorpus": { + "methods": [ + "get_rag_corpus" + ] + }, + "GetRagFile": { + "methods": [ + "get_rag_file" + ] + }, + "ImportRagFiles": { + "methods": [ + "import_rag_files" + ] + }, + "ListRagCorpora": { + "methods": [ + "list_rag_corpora" + ] + }, + "ListRagFiles": { + "methods": [ + "list_rag_files" + ] + }, + "UploadRagFile": { + "methods": [ + "upload_rag_file" + ] + } + } + }, + "grpc-async": { + "libraryClient": "VertexRagDataServiceAsyncClient", + "rpcs": { + "CreateRagCorpus": { + "methods": [ + "create_rag_corpus" + ] + }, + "DeleteRagCorpus": { + "methods": [ + "delete_rag_corpus" + ] + }, + "DeleteRagFile": { + "methods": [ + "delete_rag_file" + ] + }, + "GetRagCorpus": { + "methods": [ + "get_rag_corpus" + ] + }, + "GetRagFile": { + "methods": [ + "get_rag_file" + ] + }, + "ImportRagFiles": { + "methods": [ + "import_rag_files" + ] + }, + "ListRagCorpora": { + "methods": [ + "list_rag_corpora" + ] + }, + "ListRagFiles": { + "methods": [ + "list_rag_files" + ] + }, + "UploadRagFile": { + "methods": [ + "upload_rag_file" + ] + } + } + }, + "rest": { + "libraryClient": "VertexRagDataServiceClient", + "rpcs": { + "CreateRagCorpus": { + "methods": [ + "create_rag_corpus" + ] + }, + "DeleteRagCorpus": { + "methods": [ + "delete_rag_corpus" + ] + }, + "DeleteRagFile": { + "methods": [ + "delete_rag_file" + ] + }, + "GetRagCorpus": { + "methods": [ + "get_rag_corpus" + ] + }, + "GetRagFile": { + "methods": [ + "get_rag_file" + ] + }, + "ImportRagFiles": { + "methods": [ + "import_rag_files" + ] + }, + "ListRagCorpora": { + "methods": [ + "list_rag_corpora" + ] + }, + "ListRagFiles": { + "methods": [ + "list_rag_files" + ] + }, + "UploadRagFile": { + "methods": [ + "upload_rag_file" + ] + } + } + } + } + }, + "VertexRagService": { + "clients": { + "grpc": { + "libraryClient": "VertexRagServiceClient", + "rpcs": { + "RetrieveContexts": { + "methods": [ + "retrieve_contexts" + ] + } + } + }, + "grpc-async": { + "libraryClient": "VertexRagServiceAsyncClient", + "rpcs": { + "RetrieveContexts": { + "methods": [ + "retrieve_contexts" + ] + } + } + }, + "rest": { + "libraryClient": "VertexRagServiceClient", + "rpcs": { + "RetrieveContexts": { + "methods": [ + "retrieve_contexts" + ] + } + } + } + } + }, "VizierService": { "clients": { "grpc": { diff --git a/google/cloud/aiplatform_v1beta1/gapic_version.py b/google/cloud/aiplatform_v1beta1/gapic_version.py index 7cde24c535..45df914735 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.46.0" # {x-release-please-version} +__version__ = "1.47.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py index e74adca50d..157eeb1488 100644 --- a/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/dataset_service/transports/rest.py @@ -1093,6 +1093,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1423,6 +1427,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1801,6 +1809,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2187,6 +2199,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2431,10 +2447,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2565,6 +2577,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5263,6 +5279,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5650,6 +5670,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -6088,6 +6112,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -6535,6 +6563,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -6779,10 +6811,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -6974,6 +7002,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py index 814fb65d2c..13d673bd97 100644 --- a/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/deployment_resource_pool_service/transports/rest.py @@ -710,6 +710,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1040,6 +1044,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1418,6 +1426,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1804,6 +1816,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2048,10 +2064,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2182,6 +2194,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3647,6 +3663,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4034,6 +4054,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4472,6 +4496,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4919,6 +4947,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5163,10 +5195,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -5358,6 +5386,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest.py index 9ec52a104d..20dc716a71 100644 --- a/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/endpoint_service/transports/rest.py @@ -782,6 +782,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1112,6 +1116,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1490,6 +1498,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1876,6 +1888,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2120,10 +2136,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2254,6 +2266,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3979,6 +3995,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4366,6 +4386,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4804,6 +4828,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5251,6 +5279,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5495,10 +5527,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -5690,6 +5718,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/__init__.py new file mode 100644 index 0000000000..b09b156cce --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import EvaluationServiceClient +from .async_client import EvaluationServiceAsyncClient + +__all__ = ( + "EvaluationServiceClient", + "EvaluationServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/async_client.py new file mode 100644 index 0000000000..ef7a8abcc1 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/async_client.py @@ -0,0 +1,1070 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.cloud.aiplatform_v1beta1.types import evaluation_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .transports.base import EvaluationServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import EvaluationServiceGrpcAsyncIOTransport +from .client import EvaluationServiceClient + + +class EvaluationServiceAsyncClient: + """Vertex AI Online Evaluation Service.""" + + _client: EvaluationServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = EvaluationServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = EvaluationServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = EvaluationServiceClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = EvaluationServiceClient._DEFAULT_UNIVERSE + + common_billing_account_path = staticmethod( + EvaluationServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + EvaluationServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(EvaluationServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + EvaluationServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + EvaluationServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + EvaluationServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(EvaluationServiceClient.common_project_path) + parse_common_project_path = staticmethod( + EvaluationServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(EvaluationServiceClient.common_location_path) + parse_common_location_path = staticmethod( + EvaluationServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EvaluationServiceAsyncClient: The constructed client. + """ + return EvaluationServiceClient.from_service_account_info.__func__(EvaluationServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EvaluationServiceAsyncClient: The constructed client. + """ + return EvaluationServiceClient.from_service_account_file.__func__(EvaluationServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return EvaluationServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> EvaluationServiceTransport: + """Returns the transport used by the client instance. + + Returns: + EvaluationServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = functools.partial( + type(EvaluationServiceClient).get_transport_class, type(EvaluationServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, EvaluationServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the evaluation service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.EvaluationServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = EvaluationServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def evaluate_instances( + self, + request: Optional[ + Union[evaluation_service.EvaluateInstancesRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> evaluation_service.EvaluateInstancesResponse: + r"""Evaluates instances based on a given metric. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_evaluate_instances(): + # Create a client + client = aiplatform_v1beta1.EvaluationServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.EvaluateInstancesRequest( + location="location_value", + ) + + # Make the request + response = await client.evaluate_instances(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.EvaluateInstancesRequest, dict]]): + The request object. Request message for + EvaluationService.EvaluateInstances. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EvaluateInstancesResponse: + Response message for + EvaluationService.EvaluateInstances. + + """ + # Create or coerce a protobuf request object. + request = evaluation_service.EvaluateInstancesRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.evaluate_instances, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("location", request.location),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "EvaluationServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("EvaluationServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/client.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/client.py new file mode 100644 index 0000000000..deeec19140 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/client.py @@ -0,0 +1,1471 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.cloud.aiplatform_v1beta1.types import evaluation_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .transports.base import EvaluationServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import EvaluationServiceGrpcTransport +from .transports.grpc_asyncio import EvaluationServiceGrpcAsyncIOTransport +from .transports.rest import EvaluationServiceRestTransport + + +class EvaluationServiceClientMeta(type): + """Metaclass for the EvaluationService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[EvaluationServiceTransport]] + _transport_registry["grpc"] = EvaluationServiceGrpcTransport + _transport_registry["grpc_asyncio"] = EvaluationServiceGrpcAsyncIOTransport + _transport_registry["rest"] = EvaluationServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[EvaluationServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class EvaluationServiceClient(metaclass=EvaluationServiceClientMeta): + """Vertex AI Online Evaluation Service.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EvaluationServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + EvaluationServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> EvaluationServiceTransport: + """Returns the transport used by the client instance. + + Returns: + EvaluationServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = EvaluationServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = EvaluationServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = EvaluationServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = EvaluationServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = EvaluationServiceClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or EvaluationServiceClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, EvaluationServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the evaluation service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, EvaluationServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = EvaluationServiceClient._read_environment_variables() + self._client_cert_source = EvaluationServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = EvaluationServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, EvaluationServiceTransport) + if transport_provided: + # transport is a EvaluationServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(EvaluationServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or EvaluationServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(cast(str, transport)) + self._transport = Transport( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + def evaluate_instances( + self, + request: Optional[ + Union[evaluation_service.EvaluateInstancesRequest, dict] + ] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> evaluation_service.EvaluateInstancesResponse: + r"""Evaluates instances based on a given metric. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_evaluate_instances(): + # Create a client + client = aiplatform_v1beta1.EvaluationServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.EvaluateInstancesRequest( + location="location_value", + ) + + # Make the request + response = client.evaluate_instances(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.EvaluateInstancesRequest, dict]): + The request object. Request message for + EvaluationService.EvaluateInstances. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.EvaluateInstancesResponse: + Response message for + EvaluationService.EvaluateInstances. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a evaluation_service.EvaluateInstancesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, evaluation_service.EvaluateInstancesRequest): + request = evaluation_service.EvaluateInstancesRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.evaluate_instances] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("location", request.location),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "EvaluationServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("EvaluationServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/__init__.py new file mode 100644 index 0000000000..60197b61ce --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import EvaluationServiceTransport +from .grpc import EvaluationServiceGrpcTransport +from .grpc_asyncio import EvaluationServiceGrpcAsyncIOTransport +from .rest import EvaluationServiceRestTransport +from .rest import EvaluationServiceRestInterceptor + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[EvaluationServiceTransport]] +_transport_registry["grpc"] = EvaluationServiceGrpcTransport +_transport_registry["grpc_asyncio"] = EvaluationServiceGrpcAsyncIOTransport +_transport_registry["rest"] = EvaluationServiceRestTransport + +__all__ = ( + "EvaluationServiceTransport", + "EvaluationServiceGrpcTransport", + "EvaluationServiceGrpcAsyncIOTransport", + "EvaluationServiceRestTransport", + "EvaluationServiceRestInterceptor", +) diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/base.py new file mode 100644 index 0000000000..f29ab695a8 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/base.py @@ -0,0 +1,260 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import evaluation_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class EvaluationServiceTransport(abc.ABC): + """Abstract transport class for EvaluationService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.evaluate_instances: gapic_v1.method.wrap_method( + self.evaluate_instances, + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def evaluate_instances( + self, + ) -> Callable[ + [evaluation_service.EvaluateInstancesRequest], + Union[ + evaluation_service.EvaluateInstancesResponse, + Awaitable[evaluation_service.EvaluateInstancesResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("EvaluationServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc.py new file mode 100644 index 0000000000..3095f9477d --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc.py @@ -0,0 +1,476 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import evaluation_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import EvaluationServiceTransport, DEFAULT_CLIENT_INFO + + +class EvaluationServiceGrpcTransport(EvaluationServiceTransport): + """gRPC backend transport for EvaluationService. + + Vertex AI Online Evaluation Service. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def evaluate_instances( + self, + ) -> Callable[ + [evaluation_service.EvaluateInstancesRequest], + evaluation_service.EvaluateInstancesResponse, + ]: + r"""Return a callable for the evaluate instances method over gRPC. + + Evaluates instances based on a given metric. + + Returns: + Callable[[~.EvaluateInstancesRequest], + ~.EvaluateInstancesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "evaluate_instances" not in self._stubs: + self._stubs["evaluate_instances"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.EvaluationService/EvaluateInstances", + request_serializer=evaluation_service.EvaluateInstancesRequest.serialize, + response_deserializer=evaluation_service.EvaluateInstancesResponse.deserialize, + ) + return self._stubs["evaluate_instances"] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("EvaluationServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..c3b57a7bfb --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/grpc_asyncio.py @@ -0,0 +1,475 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import evaluation_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import EvaluationServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import EvaluationServiceGrpcTransport + + +class EvaluationServiceGrpcAsyncIOTransport(EvaluationServiceTransport): + """gRPC AsyncIO backend transport for EvaluationService. + + Vertex AI Online Evaluation Service. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def evaluate_instances( + self, + ) -> Callable[ + [evaluation_service.EvaluateInstancesRequest], + Awaitable[evaluation_service.EvaluateInstancesResponse], + ]: + r"""Return a callable for the evaluate instances method over gRPC. + + Evaluates instances based on a given metric. + + Returns: + Callable[[~.EvaluateInstancesRequest], + Awaitable[~.EvaluateInstancesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "evaluate_instances" not in self._stubs: + self._stubs["evaluate_instances"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.EvaluationService/EvaluateInstances", + request_serializer=evaluation_service.EvaluateInstancesRequest.serialize, + response_deserializer=evaluation_service.EvaluateInstancesResponse.deserialize, + ) + return self._stubs["evaluate_instances"] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("EvaluationServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/rest.py new file mode 100644 index 0000000000..f52010d78a --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/evaluation_service/transports/rest.py @@ -0,0 +1,3227 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import evaluation_service +from google.longrunning import operations_pb2 # type: ignore + +from .base import ( + EvaluationServiceTransport, + DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, +) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class EvaluationServiceRestInterceptor: + """Interceptor for EvaluationService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the EvaluationServiceRestTransport. + + .. code-block:: python + class MyCustomEvaluationServiceInterceptor(EvaluationServiceRestInterceptor): + def pre_evaluate_instances(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_evaluate_instances(self, response): + logging.log(f"Received response: {response}") + return response + + transport = EvaluationServiceRestTransport(interceptor=MyCustomEvaluationServiceInterceptor()) + client = EvaluationServiceClient(transport=transport) + + + """ + + def pre_evaluate_instances( + self, + request: evaluation_service.EvaluateInstancesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[evaluation_service.EvaluateInstancesRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for evaluate_instances + + Override in a subclass to manipulate the request or metadata + before they are sent to the EvaluationService server. + """ + return request, metadata + + def post_evaluate_instances( + self, response: evaluation_service.EvaluateInstancesResponse + ) -> evaluation_service.EvaluateInstancesResponse: + """Post-rpc interceptor for evaluate_instances + + Override in a subclass to manipulate the response + after it is returned by the EvaluationService server but before + it is returned to user code. + """ + return response + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the EvaluationService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the EvaluationService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the EvaluationService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the EvaluationService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the EvaluationService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the EvaluationService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the EvaluationService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the EvaluationService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the EvaluationService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the EvaluationService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the EvaluationService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the EvaluationService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the EvaluationService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the EvaluationService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the EvaluationService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the EvaluationService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the EvaluationService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the EvaluationService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.WaitOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the EvaluationService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the EvaluationService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class EvaluationServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: EvaluationServiceRestInterceptor + + +class EvaluationServiceRestTransport(EvaluationServiceTransport): + """REST backend transport for EvaluationService. + + Vertex AI Online Evaluation Service. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via an issue in this + library's source repository. Thank you! + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[EvaluationServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via a GitHub issue in + this library's repository. Thank you! + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or EvaluationServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _EvaluateInstances(EvaluationServiceRestStub): + def __hash__(self): + return hash("EvaluateInstances") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: evaluation_service.EvaluateInstancesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> evaluation_service.EvaluateInstancesResponse: + r"""Call the evaluate instances method over HTTP. + + Args: + request (~.evaluation_service.EvaluateInstancesRequest): + The request object. Request message for + EvaluationService.EvaluateInstances. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.evaluation_service.EvaluateInstancesResponse: + Response message for + EvaluationService.EvaluateInstances. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{location=projects/*/locations/*}:evaluateInstances", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_evaluate_instances( + request, metadata + ) + pb_request = evaluation_service.EvaluateInstancesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = evaluation_service.EvaluateInstancesResponse() + pb_resp = evaluation_service.EvaluateInstancesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_evaluate_instances(resp) + return resp + + @property + def evaluate_instances( + self, + ) -> Callable[ + [evaluation_service.EvaluateInstancesRequest], + evaluation_service.EvaluateInstancesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._EvaluateInstances(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation(EvaluationServiceRestStub): + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_location(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.Location() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_location(resp) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations(EvaluationServiceRestStub): + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*}/locations", + }, + ] + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_locations(resp) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy(EvaluationServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + ] + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy(EvaluationServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + ] + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions(EvaluationServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + ] + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(EvaluationServiceRestStub): + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation(EvaluationServiceRestStub): + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(EvaluationServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(EvaluationServiceRestStub): + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation(EvaluationServiceRestStub): + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_wait_operation(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("EvaluationServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/__init__.py new file mode 100644 index 0000000000..fee41604a1 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ExtensionExecutionServiceClient +from .async_client import ExtensionExecutionServiceAsyncClient + +__all__ = ( + "ExtensionExecutionServiceClient", + "ExtensionExecutionServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/async_client.py new file mode 100644 index 0000000000..c960dd6299 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/async_client.py @@ -0,0 +1,1249 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.cloud.aiplatform_v1beta1.types import content +from google.cloud.aiplatform_v1beta1.types import extension_execution_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .transports.base import ExtensionExecutionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ExtensionExecutionServiceGrpcAsyncIOTransport +from .client import ExtensionExecutionServiceClient + + +class ExtensionExecutionServiceAsyncClient: + """A service for Extension execution.""" + + _client: ExtensionExecutionServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = ExtensionExecutionServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ExtensionExecutionServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = ( + ExtensionExecutionServiceClient._DEFAULT_ENDPOINT_TEMPLATE + ) + _DEFAULT_UNIVERSE = ExtensionExecutionServiceClient._DEFAULT_UNIVERSE + + extension_path = staticmethod(ExtensionExecutionServiceClient.extension_path) + parse_extension_path = staticmethod( + ExtensionExecutionServiceClient.parse_extension_path + ) + secret_version_path = staticmethod( + ExtensionExecutionServiceClient.secret_version_path + ) + parse_secret_version_path = staticmethod( + ExtensionExecutionServiceClient.parse_secret_version_path + ) + common_billing_account_path = staticmethod( + ExtensionExecutionServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + ExtensionExecutionServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod( + ExtensionExecutionServiceClient.common_folder_path + ) + parse_common_folder_path = staticmethod( + ExtensionExecutionServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + ExtensionExecutionServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + ExtensionExecutionServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod( + ExtensionExecutionServiceClient.common_project_path + ) + parse_common_project_path = staticmethod( + ExtensionExecutionServiceClient.parse_common_project_path + ) + common_location_path = staticmethod( + ExtensionExecutionServiceClient.common_location_path + ) + parse_common_location_path = staticmethod( + ExtensionExecutionServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ExtensionExecutionServiceAsyncClient: The constructed client. + """ + return ExtensionExecutionServiceClient.from_service_account_info.__func__(ExtensionExecutionServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ExtensionExecutionServiceAsyncClient: The constructed client. + """ + return ExtensionExecutionServiceClient.from_service_account_file.__func__(ExtensionExecutionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return ExtensionExecutionServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> ExtensionExecutionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ExtensionExecutionServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = functools.partial( + type(ExtensionExecutionServiceClient).get_transport_class, + type(ExtensionExecutionServiceClient), + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ExtensionExecutionServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the extension execution service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ExtensionExecutionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ExtensionExecutionServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def execute_extension( + self, + request: Optional[ + Union[extension_execution_service.ExecuteExtensionRequest, dict] + ] = None, + *, + name: Optional[str] = None, + operation_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> extension_execution_service.ExecuteExtensionResponse: + r"""Executes the request against a given extension. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_execute_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionExecutionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExecuteExtensionRequest( + name="name_value", + operation_id="operation_id_value", + ) + + # Make the request + response = await client.execute_extension(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.ExecuteExtensionRequest, dict]]): + The request object. Request message for + [ExtensionExecutionService.ExecuteExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.ExecuteExtension]. + name (:class:`str`): + Required. Name (identifier) of the extension; Format: + ``projects/{project}/locations/{location}/extensions/{extension}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Required. The desired ID of the operation to be executed + in this extension as defined in + [ExtensionOperation.operation_id][google.cloud.aiplatform.v1beta1.ExtensionOperation.operation_id]. + + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ExecuteExtensionResponse: + Response message for + [ExtensionExecutionService.ExecuteExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.ExecuteExtension]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, operation_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = extension_execution_service.ExecuteExtensionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if operation_id is not None: + request.operation_id = operation_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.execute_extension, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def query_extension( + self, + request: Optional[ + Union[extension_execution_service.QueryExtensionRequest, dict] + ] = None, + *, + name: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> extension_execution_service.QueryExtensionResponse: + r"""Queries an extension with a default controller. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_query_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionExecutionServiceAsyncClient() + + # Initialize request argument(s) + contents = aiplatform_v1beta1.Content() + contents.parts.text = "text_value" + + request = aiplatform_v1beta1.QueryExtensionRequest( + name="name_value", + contents=contents, + ) + + # Make the request + response = await client.query_extension(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.QueryExtensionRequest, dict]]): + The request object. Request message for + [ExtensionExecutionService.QueryExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.QueryExtension]. + name (:class:`str`): + Required. Name (identifier) of the extension; Format: + ``projects/{project}/locations/{location}/extensions/{extension}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (:class:`MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]`): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.QueryExtensionResponse: + Response message for + [ExtensionExecutionService.QueryExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.QueryExtension]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = extension_execution_service.QueryExtensionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if contents: + request.contents.extend(contents) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.query_extension, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "ExtensionExecutionServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("ExtensionExecutionServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/client.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/client.py new file mode 100644 index 0000000000..8dbb33099e --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/client.py @@ -0,0 +1,1678 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.cloud.aiplatform_v1beta1.types import content +from google.cloud.aiplatform_v1beta1.types import extension_execution_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .transports.base import ExtensionExecutionServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ExtensionExecutionServiceGrpcTransport +from .transports.grpc_asyncio import ExtensionExecutionServiceGrpcAsyncIOTransport +from .transports.rest import ExtensionExecutionServiceRestTransport + + +class ExtensionExecutionServiceClientMeta(type): + """Metaclass for the ExtensionExecutionService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[ExtensionExecutionServiceTransport]] + _transport_registry["grpc"] = ExtensionExecutionServiceGrpcTransport + _transport_registry["grpc_asyncio"] = ExtensionExecutionServiceGrpcAsyncIOTransport + _transport_registry["rest"] = ExtensionExecutionServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[ExtensionExecutionServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ExtensionExecutionServiceClient(metaclass=ExtensionExecutionServiceClientMeta): + """A service for Extension execution.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ExtensionExecutionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ExtensionExecutionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ExtensionExecutionServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ExtensionExecutionServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def extension_path( + project: str, + location: str, + extension: str, + ) -> str: + """Returns a fully-qualified extension string.""" + return "projects/{project}/locations/{location}/extensions/{extension}".format( + project=project, + location=location, + extension=extension, + ) + + @staticmethod + def parse_extension_path(path: str) -> Dict[str, str]: + """Parses a extension path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/extensions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def secret_version_path( + project: str, + secret: str, + secret_version: str, + ) -> str: + """Returns a fully-qualified secret_version string.""" + return "projects/{project}/secrets/{secret}/versions/{secret_version}".format( + project=project, + secret=secret, + secret_version=secret_version, + ) + + @staticmethod + def parse_secret_version_path(path: str) -> Dict[str, str]: + """Parses a secret_version path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/secrets/(?P.+?)/versions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = ExtensionExecutionServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = ExtensionExecutionServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = ( + ExtensionExecutionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = ExtensionExecutionServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = ExtensionExecutionServiceClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or ExtensionExecutionServiceClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, ExtensionExecutionServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the extension execution service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ExtensionExecutionServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = ExtensionExecutionServiceClient._read_environment_variables() + self._client_cert_source = ( + ExtensionExecutionServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + ) + self._universe_domain = ExtensionExecutionServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, ExtensionExecutionServiceTransport) + if transport_provided: + # transport is a ExtensionExecutionServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(ExtensionExecutionServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or ExtensionExecutionServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(cast(str, transport)) + self._transport = Transport( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + def execute_extension( + self, + request: Optional[ + Union[extension_execution_service.ExecuteExtensionRequest, dict] + ] = None, + *, + name: Optional[str] = None, + operation_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> extension_execution_service.ExecuteExtensionResponse: + r"""Executes the request against a given extension. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_execute_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionExecutionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExecuteExtensionRequest( + name="name_value", + operation_id="operation_id_value", + ) + + # Make the request + response = client.execute_extension(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ExecuteExtensionRequest, dict]): + The request object. Request message for + [ExtensionExecutionService.ExecuteExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.ExecuteExtension]. + name (str): + Required. Name (identifier) of the extension; Format: + ``projects/{project}/locations/{location}/extensions/{extension}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (str): + Required. The desired ID of the operation to be executed + in this extension as defined in + [ExtensionOperation.operation_id][google.cloud.aiplatform.v1beta1.ExtensionOperation.operation_id]. + + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.ExecuteExtensionResponse: + Response message for + [ExtensionExecutionService.ExecuteExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.ExecuteExtension]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, operation_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a extension_execution_service.ExecuteExtensionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, extension_execution_service.ExecuteExtensionRequest): + request = extension_execution_service.ExecuteExtensionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if operation_id is not None: + request.operation_id = operation_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.execute_extension] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def query_extension( + self, + request: Optional[ + Union[extension_execution_service.QueryExtensionRequest, dict] + ] = None, + *, + name: Optional[str] = None, + contents: Optional[MutableSequence[content.Content]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> extension_execution_service.QueryExtensionResponse: + r"""Queries an extension with a default controller. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_query_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionExecutionServiceClient() + + # Initialize request argument(s) + contents = aiplatform_v1beta1.Content() + contents.parts.text = "text_value" + + request = aiplatform_v1beta1.QueryExtensionRequest( + name="name_value", + contents=contents, + ) + + # Make the request + response = client.query_extension(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.QueryExtensionRequest, dict]): + The request object. Request message for + [ExtensionExecutionService.QueryExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.QueryExtension]. + name (str): + Required. Name (identifier) of the extension; Format: + ``projects/{project}/locations/{location}/extensions/{extension}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + contents (MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a + single instance. For multi-turn queries, + this is a repeated field that contains + conversation history + latest request. + + This corresponds to the ``contents`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.QueryExtensionResponse: + Response message for + [ExtensionExecutionService.QueryExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.QueryExtension]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, contents]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a extension_execution_service.QueryExtensionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, extension_execution_service.QueryExtensionRequest): + request = extension_execution_service.QueryExtensionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if contents is not None: + request.contents = contents + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.query_extension] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "ExtensionExecutionServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("ExtensionExecutionServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/__init__.py new file mode 100644 index 0000000000..6d0191a0d3 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ExtensionExecutionServiceTransport +from .grpc import ExtensionExecutionServiceGrpcTransport +from .grpc_asyncio import ExtensionExecutionServiceGrpcAsyncIOTransport +from .rest import ExtensionExecutionServiceRestTransport +from .rest import ExtensionExecutionServiceRestInterceptor + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[ExtensionExecutionServiceTransport]] +_transport_registry["grpc"] = ExtensionExecutionServiceGrpcTransport +_transport_registry["grpc_asyncio"] = ExtensionExecutionServiceGrpcAsyncIOTransport +_transport_registry["rest"] = ExtensionExecutionServiceRestTransport + +__all__ = ( + "ExtensionExecutionServiceTransport", + "ExtensionExecutionServiceGrpcTransport", + "ExtensionExecutionServiceGrpcAsyncIOTransport", + "ExtensionExecutionServiceRestTransport", + "ExtensionExecutionServiceRestInterceptor", +) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/base.py new file mode 100644 index 0000000000..964e8091a9 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/base.py @@ -0,0 +1,277 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import extension_execution_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class ExtensionExecutionServiceTransport(abc.ABC): + """Abstract transport class for ExtensionExecutionService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.execute_extension: gapic_v1.method.wrap_method( + self.execute_extension, + default_timeout=None, + client_info=client_info, + ), + self.query_extension: gapic_v1.method.wrap_method( + self.query_extension, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def execute_extension( + self, + ) -> Callable[ + [extension_execution_service.ExecuteExtensionRequest], + Union[ + extension_execution_service.ExecuteExtensionResponse, + Awaitable[extension_execution_service.ExecuteExtensionResponse], + ], + ]: + raise NotImplementedError() + + @property + def query_extension( + self, + ) -> Callable[ + [extension_execution_service.QueryExtensionRequest], + Union[ + extension_execution_service.QueryExtensionResponse, + Awaitable[extension_execution_service.QueryExtensionResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("ExtensionExecutionServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc.py new file mode 100644 index 0000000000..a23531338b --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc.py @@ -0,0 +1,505 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import extension_execution_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import ExtensionExecutionServiceTransport, DEFAULT_CLIENT_INFO + + +class ExtensionExecutionServiceGrpcTransport(ExtensionExecutionServiceTransport): + """gRPC backend transport for ExtensionExecutionService. + + A service for Extension execution. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def execute_extension( + self, + ) -> Callable[ + [extension_execution_service.ExecuteExtensionRequest], + extension_execution_service.ExecuteExtensionResponse, + ]: + r"""Return a callable for the execute extension method over gRPC. + + Executes the request against a given extension. + + Returns: + Callable[[~.ExecuteExtensionRequest], + ~.ExecuteExtensionResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "execute_extension" not in self._stubs: + self._stubs["execute_extension"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionExecutionService/ExecuteExtension", + request_serializer=extension_execution_service.ExecuteExtensionRequest.serialize, + response_deserializer=extension_execution_service.ExecuteExtensionResponse.deserialize, + ) + return self._stubs["execute_extension"] + + @property + def query_extension( + self, + ) -> Callable[ + [extension_execution_service.QueryExtensionRequest], + extension_execution_service.QueryExtensionResponse, + ]: + r"""Return a callable for the query extension method over gRPC. + + Queries an extension with a default controller. + + Returns: + Callable[[~.QueryExtensionRequest], + ~.QueryExtensionResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_extension" not in self._stubs: + self._stubs["query_extension"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionExecutionService/QueryExtension", + request_serializer=extension_execution_service.QueryExtensionRequest.serialize, + response_deserializer=extension_execution_service.QueryExtensionResponse.deserialize, + ) + return self._stubs["query_extension"] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("ExtensionExecutionServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..15762bdd79 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/grpc_asyncio.py @@ -0,0 +1,504 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import extension_execution_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import ExtensionExecutionServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import ExtensionExecutionServiceGrpcTransport + + +class ExtensionExecutionServiceGrpcAsyncIOTransport(ExtensionExecutionServiceTransport): + """gRPC AsyncIO backend transport for ExtensionExecutionService. + + A service for Extension execution. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def execute_extension( + self, + ) -> Callable[ + [extension_execution_service.ExecuteExtensionRequest], + Awaitable[extension_execution_service.ExecuteExtensionResponse], + ]: + r"""Return a callable for the execute extension method over gRPC. + + Executes the request against a given extension. + + Returns: + Callable[[~.ExecuteExtensionRequest], + Awaitable[~.ExecuteExtensionResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "execute_extension" not in self._stubs: + self._stubs["execute_extension"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionExecutionService/ExecuteExtension", + request_serializer=extension_execution_service.ExecuteExtensionRequest.serialize, + response_deserializer=extension_execution_service.ExecuteExtensionResponse.deserialize, + ) + return self._stubs["execute_extension"] + + @property + def query_extension( + self, + ) -> Callable[ + [extension_execution_service.QueryExtensionRequest], + Awaitable[extension_execution_service.QueryExtensionResponse], + ]: + r"""Return a callable for the query extension method over gRPC. + + Queries an extension with a default controller. + + Returns: + Callable[[~.QueryExtensionRequest], + Awaitable[~.QueryExtensionResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "query_extension" not in self._stubs: + self._stubs["query_extension"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionExecutionService/QueryExtension", + request_serializer=extension_execution_service.QueryExtensionRequest.serialize, + response_deserializer=extension_execution_service.QueryExtensionResponse.deserialize, + ) + return self._stubs["query_extension"] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("ExtensionExecutionServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/rest.py new file mode 100644 index 0000000000..74adc9a5b4 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_execution_service/transports/rest.py @@ -0,0 +1,3366 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import extension_execution_service +from google.longrunning import operations_pb2 # type: ignore + +from .base import ( + ExtensionExecutionServiceTransport, + DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, +) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class ExtensionExecutionServiceRestInterceptor: + """Interceptor for ExtensionExecutionService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the ExtensionExecutionServiceRestTransport. + + .. code-block:: python + class MyCustomExtensionExecutionServiceInterceptor(ExtensionExecutionServiceRestInterceptor): + def pre_execute_extension(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_execute_extension(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_query_extension(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_query_extension(self, response): + logging.log(f"Received response: {response}") + return response + + transport = ExtensionExecutionServiceRestTransport(interceptor=MyCustomExtensionExecutionServiceInterceptor()) + client = ExtensionExecutionServiceClient(transport=transport) + + + """ + + def pre_execute_extension( + self, + request: extension_execution_service.ExecuteExtensionRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + extension_execution_service.ExecuteExtensionRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for execute_extension + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionExecutionService server. + """ + return request, metadata + + def post_execute_extension( + self, response: extension_execution_service.ExecuteExtensionResponse + ) -> extension_execution_service.ExecuteExtensionResponse: + """Post-rpc interceptor for execute_extension + + Override in a subclass to manipulate the response + after it is returned by the ExtensionExecutionService server but before + it is returned to user code. + """ + return response + + def pre_query_extension( + self, + request: extension_execution_service.QueryExtensionRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + extension_execution_service.QueryExtensionRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for query_extension + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionExecutionService server. + """ + return request, metadata + + def post_query_extension( + self, response: extension_execution_service.QueryExtensionResponse + ) -> extension_execution_service.QueryExtensionResponse: + """Post-rpc interceptor for query_extension + + Override in a subclass to manipulate the response + after it is returned by the ExtensionExecutionService server but before + it is returned to user code. + """ + return response + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionExecutionService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the ExtensionExecutionService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionExecutionService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the ExtensionExecutionService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionExecutionService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the ExtensionExecutionService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionExecutionService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the ExtensionExecutionService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionExecutionService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the ExtensionExecutionService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionExecutionService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the ExtensionExecutionService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionExecutionService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the ExtensionExecutionService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionExecutionService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the ExtensionExecutionService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionExecutionService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the ExtensionExecutionService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.WaitOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionExecutionService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the ExtensionExecutionService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class ExtensionExecutionServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: ExtensionExecutionServiceRestInterceptor + + +class ExtensionExecutionServiceRestTransport(ExtensionExecutionServiceTransport): + """REST backend transport for ExtensionExecutionService. + + A service for Extension execution. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via an issue in this + library's source repository. Thank you! + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[ExtensionExecutionServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via a GitHub issue in + this library's repository. Thank you! + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or ExtensionExecutionServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _ExecuteExtension(ExtensionExecutionServiceRestStub): + def __hash__(self): + return hash("ExecuteExtension") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: extension_execution_service.ExecuteExtensionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> extension_execution_service.ExecuteExtensionResponse: + r"""Call the execute extension method over HTTP. + + Args: + request (~.extension_execution_service.ExecuteExtensionRequest): + The request object. Request message for + [ExtensionExecutionService.ExecuteExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.ExecuteExtension]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.extension_execution_service.ExecuteExtensionResponse: + Response message for + [ExtensionExecutionService.ExecuteExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.ExecuteExtension]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}:execute", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_execute_extension( + request, metadata + ) + pb_request = extension_execution_service.ExecuteExtensionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = extension_execution_service.ExecuteExtensionResponse() + pb_resp = extension_execution_service.ExecuteExtensionResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_execute_extension(resp) + return resp + + class _QueryExtension(ExtensionExecutionServiceRestStub): + def __hash__(self): + return hash("QueryExtension") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: extension_execution_service.QueryExtensionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> extension_execution_service.QueryExtensionResponse: + r"""Call the query extension method over HTTP. + + Args: + request (~.extension_execution_service.QueryExtensionRequest): + The request object. Request message for + [ExtensionExecutionService.QueryExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.QueryExtension]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.extension_execution_service.QueryExtensionResponse: + Response message for + [ExtensionExecutionService.QueryExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.QueryExtension]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}:query", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_query_extension(request, metadata) + pb_request = extension_execution_service.QueryExtensionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = extension_execution_service.QueryExtensionResponse() + pb_resp = extension_execution_service.QueryExtensionResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_query_extension(resp) + return resp + + @property + def execute_extension( + self, + ) -> Callable[ + [extension_execution_service.ExecuteExtensionRequest], + extension_execution_service.ExecuteExtensionResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ExecuteExtension(self._session, self._host, self._interceptor) # type: ignore + + @property + def query_extension( + self, + ) -> Callable[ + [extension_execution_service.QueryExtensionRequest], + extension_execution_service.QueryExtensionResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._QueryExtension(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation(ExtensionExecutionServiceRestStub): + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_location(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.Location() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_location(resp) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations(ExtensionExecutionServiceRestStub): + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*}/locations", + }, + ] + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_locations(resp) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy(ExtensionExecutionServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + ] + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy(ExtensionExecutionServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + ] + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions(ExtensionExecutionServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + ] + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(ExtensionExecutionServiceRestStub): + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation(ExtensionExecutionServiceRestStub): + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(ExtensionExecutionServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(ExtensionExecutionServiceRestStub): + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation(ExtensionExecutionServiceRestStub): + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_wait_operation(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("ExtensionExecutionServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/__init__.py new file mode 100644 index 0000000000..249e2ae9b4 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ExtensionRegistryServiceClient +from .async_client import ExtensionRegistryServiceAsyncClient + +__all__ = ( + "ExtensionRegistryServiceClient", + "ExtensionRegistryServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py new file mode 100644 index 0000000000..979799ffd2 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/async_client.py @@ -0,0 +1,1648 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.extension_registry_service import pagers +from google.cloud.aiplatform_v1beta1.types import extension +from google.cloud.aiplatform_v1beta1.types import extension as gca_extension +from google.cloud.aiplatform_v1beta1.types import extension_registry_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import tool +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ExtensionRegistryServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ExtensionRegistryServiceGrpcAsyncIOTransport +from .client import ExtensionRegistryServiceClient + + +class ExtensionRegistryServiceAsyncClient: + """A service for managing Vertex AI's Extension registry.""" + + _client: ExtensionRegistryServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = ExtensionRegistryServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ExtensionRegistryServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = ( + ExtensionRegistryServiceClient._DEFAULT_ENDPOINT_TEMPLATE + ) + _DEFAULT_UNIVERSE = ExtensionRegistryServiceClient._DEFAULT_UNIVERSE + + extension_path = staticmethod(ExtensionRegistryServiceClient.extension_path) + parse_extension_path = staticmethod( + ExtensionRegistryServiceClient.parse_extension_path + ) + secret_version_path = staticmethod( + ExtensionRegistryServiceClient.secret_version_path + ) + parse_secret_version_path = staticmethod( + ExtensionRegistryServiceClient.parse_secret_version_path + ) + service_path = staticmethod(ExtensionRegistryServiceClient.service_path) + parse_service_path = staticmethod(ExtensionRegistryServiceClient.parse_service_path) + common_billing_account_path = staticmethod( + ExtensionRegistryServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + ExtensionRegistryServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(ExtensionRegistryServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + ExtensionRegistryServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + ExtensionRegistryServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + ExtensionRegistryServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod( + ExtensionRegistryServiceClient.common_project_path + ) + parse_common_project_path = staticmethod( + ExtensionRegistryServiceClient.parse_common_project_path + ) + common_location_path = staticmethod( + ExtensionRegistryServiceClient.common_location_path + ) + parse_common_location_path = staticmethod( + ExtensionRegistryServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ExtensionRegistryServiceAsyncClient: The constructed client. + """ + return ExtensionRegistryServiceClient.from_service_account_info.__func__(ExtensionRegistryServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ExtensionRegistryServiceAsyncClient: The constructed client. + """ + return ExtensionRegistryServiceClient.from_service_account_file.__func__(ExtensionRegistryServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return ExtensionRegistryServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> ExtensionRegistryServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ExtensionRegistryServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = functools.partial( + type(ExtensionRegistryServiceClient).get_transport_class, + type(ExtensionRegistryServiceClient), + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ExtensionRegistryServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the extension registry service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ExtensionRegistryServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ExtensionRegistryServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def import_extension( + self, + request: Optional[ + Union[extension_registry_service.ImportExtensionRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + extension: Optional[gca_extension.Extension] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Imports an Extension. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_import_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient() + + # Initialize request argument(s) + extension = aiplatform_v1beta1.Extension() + extension.display_name = "display_name_value" + extension.manifest.name = "name_value" + extension.manifest.description = "description_value" + extension.manifest.api_spec.open_api_yaml = "open_api_yaml_value" + extension.manifest.auth_config.api_key_config.name = "name_value" + extension.manifest.auth_config.api_key_config.api_key_secret = "api_key_secret_value" + extension.manifest.auth_config.api_key_config.http_element_location = "HTTP_IN_COOKIE" + + request = aiplatform_v1beta1.ImportExtensionRequest( + parent="parent_value", + extension=extension, + ) + + # Make the request + operation = client.import_extension(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.ImportExtensionRequest, dict]]): + The request object. Request message for + [ExtensionRegistryService.ImportExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ImportExtension]. + parent (:class:`str`): + Required. The resource name of the Location to import + the Extension in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + extension (:class:`google.cloud.aiplatform_v1beta1.types.Extension`): + Required. The Extension to import. + This corresponds to the ``extension`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Extension` Extensions are tools for large language models to access external data, run + computations, etc. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, extension]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = extension_registry_service.ImportExtensionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if extension is not None: + request.extension = extension + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_extension, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_extension.Extension, + metadata_type=extension_registry_service.ImportExtensionOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_extension( + self, + request: Optional[ + Union[extension_registry_service.GetExtensionRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> extension.Extension: + r"""Gets an Extension. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_get_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetExtensionRequest( + name="name_value", + ) + + # Make the request + response = await client.get_extension(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.GetExtensionRequest, dict]]): + The request object. Request message for + [ExtensionRegistryService.GetExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.GetExtension]. + name (:class:`str`): + Required. The name of the Extension resource. Format: + ``projects/{project}/locations/{location}/extensions/{extension}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Extension: + Extensions are tools for large + language models to access external data, + run computations, etc. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = extension_registry_service.GetExtensionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_extension, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_extensions( + self, + request: Optional[ + Union[extension_registry_service.ListExtensionsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExtensionsAsyncPager: + r"""Lists Extensions in a location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_list_extensions(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListExtensionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_extensions(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.ListExtensionsRequest, dict]]): + The request object. Request message for + [ExtensionRegistryService.ListExtensions][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ListExtensions]. + parent (:class:`str`): + Required. The resource name of the Location to list the + Extensions from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.extension_registry_service.pagers.ListExtensionsAsyncPager: + Response message for + [ExtensionRegistryService.ListExtensions][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ListExtensions] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = extension_registry_service.ListExtensionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_extensions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListExtensionsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_extension( + self, + request: Optional[ + Union[extension_registry_service.UpdateExtensionRequest, dict] + ] = None, + *, + extension: Optional[gca_extension.Extension] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_extension.Extension: + r"""Updates an Extension. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_update_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient() + + # Initialize request argument(s) + extension = aiplatform_v1beta1.Extension() + extension.display_name = "display_name_value" + extension.manifest.name = "name_value" + extension.manifest.description = "description_value" + extension.manifest.api_spec.open_api_yaml = "open_api_yaml_value" + extension.manifest.auth_config.api_key_config.name = "name_value" + extension.manifest.auth_config.api_key_config.api_key_secret = "api_key_secret_value" + extension.manifest.auth_config.api_key_config.http_element_location = "HTTP_IN_COOKIE" + + request = aiplatform_v1beta1.UpdateExtensionRequest( + extension=extension, + ) + + # Make the request + response = await client.update_extension(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.UpdateExtensionRequest, dict]]): + The request object. Request message for + [ExtensionRegistryService.UpdateExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.UpdateExtension]. + extension (:class:`google.cloud.aiplatform_v1beta1.types.Extension`): + Required. The Extension which + replaces the resource on the server. + + This corresponds to the ``extension`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. Mask specifying which fields to update. + Supported fields: + + :: + + * `display_name` + * `description` + * `tool_use_examples` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Extension: + Extensions are tools for large + language models to access external data, + run computations, etc. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([extension, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = extension_registry_service.UpdateExtensionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if extension is not None: + request.extension = extension + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_extension, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("extension.name", request.extension.name),) + ), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_extension( + self, + request: Optional[ + Union[extension_registry_service.DeleteExtensionRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes an Extension. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteExtensionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_extension(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.DeleteExtensionRequest, dict]]): + The request object. Request message for + [ExtensionRegistryService.DeleteExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.DeleteExtension]. + name (:class:`str`): + Required. The name of the Extension resource to be + deleted. Format: + ``projects/{project}/locations/{location}/extensions/{extension}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = extension_registry_service.DeleteExtensionRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_extension, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "ExtensionRegistryServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("ExtensionRegistryServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/client.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/client.py new file mode 100644 index 0000000000..acf469f12b --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/client.py @@ -0,0 +1,2101 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.extension_registry_service import pagers +from google.cloud.aiplatform_v1beta1.types import extension +from google.cloud.aiplatform_v1beta1.types import extension as gca_extension +from google.cloud.aiplatform_v1beta1.types import extension_registry_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import tool +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import ExtensionRegistryServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ExtensionRegistryServiceGrpcTransport +from .transports.grpc_asyncio import ExtensionRegistryServiceGrpcAsyncIOTransport +from .transports.rest import ExtensionRegistryServiceRestTransport + + +class ExtensionRegistryServiceClientMeta(type): + """Metaclass for the ExtensionRegistryService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[ExtensionRegistryServiceTransport]] + _transport_registry["grpc"] = ExtensionRegistryServiceGrpcTransport + _transport_registry["grpc_asyncio"] = ExtensionRegistryServiceGrpcAsyncIOTransport + _transport_registry["rest"] = ExtensionRegistryServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[ExtensionRegistryServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ExtensionRegistryServiceClient(metaclass=ExtensionRegistryServiceClientMeta): + """A service for managing Vertex AI's Extension registry.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ExtensionRegistryServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ExtensionRegistryServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ExtensionRegistryServiceTransport: + """Returns the transport used by the client instance. + + Returns: + ExtensionRegistryServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def extension_path( + project: str, + location: str, + extension: str, + ) -> str: + """Returns a fully-qualified extension string.""" + return "projects/{project}/locations/{location}/extensions/{extension}".format( + project=project, + location=location, + extension=extension, + ) + + @staticmethod + def parse_extension_path(path: str) -> Dict[str, str]: + """Parses a extension path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/extensions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def secret_version_path( + project: str, + secret: str, + secret_version: str, + ) -> str: + """Returns a fully-qualified secret_version string.""" + return "projects/{project}/secrets/{secret}/versions/{secret_version}".format( + project=project, + secret=secret, + secret_version=secret_version, + ) + + @staticmethod + def parse_secret_version_path(path: str) -> Dict[str, str]: + """Parses a secret_version path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/secrets/(?P.+?)/versions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def service_path( + project: str, + location: str, + namespace: str, + service: str, + ) -> str: + """Returns a fully-qualified service string.""" + return "projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}".format( + project=project, + location=location, + namespace=namespace, + service=service, + ) + + @staticmethod + def parse_service_path(path: str) -> Dict[str, str]: + """Parses a service path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/namespaces/(?P.+?)/services/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = ExtensionRegistryServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = ExtensionRegistryServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = ( + ExtensionRegistryServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = ExtensionRegistryServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = ExtensionRegistryServiceClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or ExtensionRegistryServiceClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, ExtensionRegistryServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the extension registry service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ExtensionRegistryServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = ExtensionRegistryServiceClient._read_environment_variables() + self._client_cert_source = ( + ExtensionRegistryServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + ) + self._universe_domain = ExtensionRegistryServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, ExtensionRegistryServiceTransport) + if transport_provided: + # transport is a ExtensionRegistryServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(ExtensionRegistryServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or ExtensionRegistryServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(cast(str, transport)) + self._transport = Transport( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + def import_extension( + self, + request: Optional[ + Union[extension_registry_service.ImportExtensionRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + extension: Optional[gca_extension.Extension] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Imports an Extension. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_import_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceClient() + + # Initialize request argument(s) + extension = aiplatform_v1beta1.Extension() + extension.display_name = "display_name_value" + extension.manifest.name = "name_value" + extension.manifest.description = "description_value" + extension.manifest.api_spec.open_api_yaml = "open_api_yaml_value" + extension.manifest.auth_config.api_key_config.name = "name_value" + extension.manifest.auth_config.api_key_config.api_key_secret = "api_key_secret_value" + extension.manifest.auth_config.api_key_config.http_element_location = "HTTP_IN_COOKIE" + + request = aiplatform_v1beta1.ImportExtensionRequest( + parent="parent_value", + extension=extension, + ) + + # Make the request + operation = client.import_extension(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ImportExtensionRequest, dict]): + The request object. Request message for + [ExtensionRegistryService.ImportExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ImportExtension]. + parent (str): + Required. The resource name of the Location to import + the Extension in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + extension (google.cloud.aiplatform_v1beta1.types.Extension): + Required. The Extension to import. + This corresponds to the ``extension`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Extension` Extensions are tools for large language models to access external data, run + computations, etc. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, extension]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a extension_registry_service.ImportExtensionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, extension_registry_service.ImportExtensionRequest): + request = extension_registry_service.ImportExtensionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if extension is not None: + request.extension = extension + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_extension] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_extension.Extension, + metadata_type=extension_registry_service.ImportExtensionOperationMetadata, + ) + + # Done; return the response. + return response + + def get_extension( + self, + request: Optional[ + Union[extension_registry_service.GetExtensionRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> extension.Extension: + r"""Gets an Extension. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_get_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetExtensionRequest( + name="name_value", + ) + + # Make the request + response = client.get_extension(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetExtensionRequest, dict]): + The request object. Request message for + [ExtensionRegistryService.GetExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.GetExtension]. + name (str): + Required. The name of the Extension resource. Format: + ``projects/{project}/locations/{location}/extensions/{extension}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Extension: + Extensions are tools for large + language models to access external data, + run computations, etc. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a extension_registry_service.GetExtensionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, extension_registry_service.GetExtensionRequest): + request = extension_registry_service.GetExtensionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_extension] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_extensions( + self, + request: Optional[ + Union[extension_registry_service.ListExtensionsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListExtensionsPager: + r"""Lists Extensions in a location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_list_extensions(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListExtensionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_extensions(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListExtensionsRequest, dict]): + The request object. Request message for + [ExtensionRegistryService.ListExtensions][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ListExtensions]. + parent (str): + Required. The resource name of the Location to list the + Extensions from. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.extension_registry_service.pagers.ListExtensionsPager: + Response message for + [ExtensionRegistryService.ListExtensions][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ListExtensions] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a extension_registry_service.ListExtensionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, extension_registry_service.ListExtensionsRequest): + request = extension_registry_service.ListExtensionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_extensions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListExtensionsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_extension( + self, + request: Optional[ + Union[extension_registry_service.UpdateExtensionRequest, dict] + ] = None, + *, + extension: Optional[gca_extension.Extension] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_extension.Extension: + r"""Updates an Extension. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_update_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceClient() + + # Initialize request argument(s) + extension = aiplatform_v1beta1.Extension() + extension.display_name = "display_name_value" + extension.manifest.name = "name_value" + extension.manifest.description = "description_value" + extension.manifest.api_spec.open_api_yaml = "open_api_yaml_value" + extension.manifest.auth_config.api_key_config.name = "name_value" + extension.manifest.auth_config.api_key_config.api_key_secret = "api_key_secret_value" + extension.manifest.auth_config.api_key_config.http_element_location = "HTTP_IN_COOKIE" + + request = aiplatform_v1beta1.UpdateExtensionRequest( + extension=extension, + ) + + # Make the request + response = client.update_extension(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpdateExtensionRequest, dict]): + The request object. Request message for + [ExtensionRegistryService.UpdateExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.UpdateExtension]. + extension (google.cloud.aiplatform_v1beta1.types.Extension): + Required. The Extension which + replaces the resource on the server. + + This corresponds to the ``extension`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Mask specifying which fields to update. + Supported fields: + + :: + + * `display_name` + * `description` + * `tool_use_examples` + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.Extension: + Extensions are tools for large + language models to access external data, + run computations, etc. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([extension, update_mask]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a extension_registry_service.UpdateExtensionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, extension_registry_service.UpdateExtensionRequest): + request = extension_registry_service.UpdateExtensionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if extension is not None: + request.extension = extension + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_extension] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata( + (("extension.name", request.extension.name),) + ), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_extension( + self, + request: Optional[ + Union[extension_registry_service.DeleteExtensionRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes an Extension. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_delete_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteExtensionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_extension(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteExtensionRequest, dict]): + The request object. Request message for + [ExtensionRegistryService.DeleteExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.DeleteExtension]. + name (str): + Required. The name of the Extension resource to be + deleted. Format: + ``projects/{project}/locations/{location}/extensions/{extension}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a extension_registry_service.DeleteExtensionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, extension_registry_service.DeleteExtensionRequest): + request = extension_registry_service.DeleteExtensionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_extension] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "ExtensionRegistryServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("ExtensionRegistryServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/pagers.py new file mode 100644 index 0000000000..0f9616c706 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/pagers.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, +) + +from google.cloud.aiplatform_v1beta1.types import extension +from google.cloud.aiplatform_v1beta1.types import extension_registry_service + + +class ListExtensionsPager: + """A pager for iterating through ``list_extensions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListExtensionsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``extensions`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListExtensions`` requests and continue to iterate + through the ``extensions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListExtensionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., extension_registry_service.ListExtensionsResponse], + request: extension_registry_service.ListExtensionsRequest, + response: extension_registry_service.ListExtensionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListExtensionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListExtensionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = extension_registry_service.ListExtensionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[extension_registry_service.ListExtensionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[extension.Extension]: + for page in self.pages: + yield from page.extensions + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListExtensionsAsyncPager: + """A pager for iterating through ``list_extensions`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListExtensionsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``extensions`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListExtensions`` requests and continue to iterate + through the ``extensions`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListExtensionsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[extension_registry_service.ListExtensionsResponse] + ], + request: extension_registry_service.ListExtensionsRequest, + response: extension_registry_service.ListExtensionsResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListExtensionsRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListExtensionsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = extension_registry_service.ListExtensionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[extension_registry_service.ListExtensionsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[extension.Extension]: + async def async_generator(): + async for page in self.pages: + for response in page.extensions: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/__init__.py new file mode 100644 index 0000000000..8514e0583d --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ExtensionRegistryServiceTransport +from .grpc import ExtensionRegistryServiceGrpcTransport +from .grpc_asyncio import ExtensionRegistryServiceGrpcAsyncIOTransport +from .rest import ExtensionRegistryServiceRestTransport +from .rest import ExtensionRegistryServiceRestInterceptor + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[ExtensionRegistryServiceTransport]] +_transport_registry["grpc"] = ExtensionRegistryServiceGrpcTransport +_transport_registry["grpc_asyncio"] = ExtensionRegistryServiceGrpcAsyncIOTransport +_transport_registry["rest"] = ExtensionRegistryServiceRestTransport + +__all__ = ( + "ExtensionRegistryServiceTransport", + "ExtensionRegistryServiceGrpcTransport", + "ExtensionRegistryServiceGrpcAsyncIOTransport", + "ExtensionRegistryServiceRestTransport", + "ExtensionRegistryServiceRestInterceptor", +) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/base.py new file mode 100644 index 0000000000..14a05bb861 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/base.py @@ -0,0 +1,324 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import extension +from google.cloud.aiplatform_v1beta1.types import extension as gca_extension +from google.cloud.aiplatform_v1beta1.types import extension_registry_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class ExtensionRegistryServiceTransport(abc.ABC): + """Abstract transport class for ExtensionRegistryService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.import_extension: gapic_v1.method.wrap_method( + self.import_extension, + default_timeout=None, + client_info=client_info, + ), + self.get_extension: gapic_v1.method.wrap_method( + self.get_extension, + default_timeout=None, + client_info=client_info, + ), + self.list_extensions: gapic_v1.method.wrap_method( + self.list_extensions, + default_timeout=None, + client_info=client_info, + ), + self.update_extension: gapic_v1.method.wrap_method( + self.update_extension, + default_timeout=None, + client_info=client_info, + ), + self.delete_extension: gapic_v1.method.wrap_method( + self.delete_extension, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def import_extension( + self, + ) -> Callable[ + [extension_registry_service.ImportExtensionRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_extension( + self, + ) -> Callable[ + [extension_registry_service.GetExtensionRequest], + Union[extension.Extension, Awaitable[extension.Extension]], + ]: + raise NotImplementedError() + + @property + def list_extensions( + self, + ) -> Callable[ + [extension_registry_service.ListExtensionsRequest], + Union[ + extension_registry_service.ListExtensionsResponse, + Awaitable[extension_registry_service.ListExtensionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def update_extension( + self, + ) -> Callable[ + [extension_registry_service.UpdateExtensionRequest], + Union[gca_extension.Extension, Awaitable[gca_extension.Extension]], + ]: + raise NotImplementedError() + + @property + def delete_extension( + self, + ) -> Callable[ + [extension_registry_service.DeleteExtensionRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("ExtensionRegistryServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc.py new file mode 100644 index 0000000000..6fe0ebaba4 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc.py @@ -0,0 +1,606 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import extension +from google.cloud.aiplatform_v1beta1.types import extension as gca_extension +from google.cloud.aiplatform_v1beta1.types import extension_registry_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import ExtensionRegistryServiceTransport, DEFAULT_CLIENT_INFO + + +class ExtensionRegistryServiceGrpcTransport(ExtensionRegistryServiceTransport): + """gRPC backend transport for ExtensionRegistryService. + + A service for managing Vertex AI's Extension registry. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def import_extension( + self, + ) -> Callable[ + [extension_registry_service.ImportExtensionRequest], operations_pb2.Operation + ]: + r"""Return a callable for the import extension method over gRPC. + + Imports an Extension. + + Returns: + Callable[[~.ImportExtensionRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_extension" not in self._stubs: + self._stubs["import_extension"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionRegistryService/ImportExtension", + request_serializer=extension_registry_service.ImportExtensionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["import_extension"] + + @property + def get_extension( + self, + ) -> Callable[ + [extension_registry_service.GetExtensionRequest], extension.Extension + ]: + r"""Return a callable for the get extension method over gRPC. + + Gets an Extension. + + Returns: + Callable[[~.GetExtensionRequest], + ~.Extension]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_extension" not in self._stubs: + self._stubs["get_extension"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionRegistryService/GetExtension", + request_serializer=extension_registry_service.GetExtensionRequest.serialize, + response_deserializer=extension.Extension.deserialize, + ) + return self._stubs["get_extension"] + + @property + def list_extensions( + self, + ) -> Callable[ + [extension_registry_service.ListExtensionsRequest], + extension_registry_service.ListExtensionsResponse, + ]: + r"""Return a callable for the list extensions method over gRPC. + + Lists Extensions in a location. + + Returns: + Callable[[~.ListExtensionsRequest], + ~.ListExtensionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_extensions" not in self._stubs: + self._stubs["list_extensions"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionRegistryService/ListExtensions", + request_serializer=extension_registry_service.ListExtensionsRequest.serialize, + response_deserializer=extension_registry_service.ListExtensionsResponse.deserialize, + ) + return self._stubs["list_extensions"] + + @property + def update_extension( + self, + ) -> Callable[ + [extension_registry_service.UpdateExtensionRequest], gca_extension.Extension + ]: + r"""Return a callable for the update extension method over gRPC. + + Updates an Extension. + + Returns: + Callable[[~.UpdateExtensionRequest], + ~.Extension]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_extension" not in self._stubs: + self._stubs["update_extension"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionRegistryService/UpdateExtension", + request_serializer=extension_registry_service.UpdateExtensionRequest.serialize, + response_deserializer=gca_extension.Extension.deserialize, + ) + return self._stubs["update_extension"] + + @property + def delete_extension( + self, + ) -> Callable[ + [extension_registry_service.DeleteExtensionRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete extension method over gRPC. + + Deletes an Extension. + + Returns: + Callable[[~.DeleteExtensionRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_extension" not in self._stubs: + self._stubs["delete_extension"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionRegistryService/DeleteExtension", + request_serializer=extension_registry_service.DeleteExtensionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_extension"] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("ExtensionRegistryServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..3e5afe111a --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/grpc_asyncio.py @@ -0,0 +1,610 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import extension +from google.cloud.aiplatform_v1beta1.types import extension as gca_extension +from google.cloud.aiplatform_v1beta1.types import extension_registry_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import ExtensionRegistryServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import ExtensionRegistryServiceGrpcTransport + + +class ExtensionRegistryServiceGrpcAsyncIOTransport(ExtensionRegistryServiceTransport): + """gRPC AsyncIO backend transport for ExtensionRegistryService. + + A service for managing Vertex AI's Extension registry. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def import_extension( + self, + ) -> Callable[ + [extension_registry_service.ImportExtensionRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the import extension method over gRPC. + + Imports an Extension. + + Returns: + Callable[[~.ImportExtensionRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_extension" not in self._stubs: + self._stubs["import_extension"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionRegistryService/ImportExtension", + request_serializer=extension_registry_service.ImportExtensionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["import_extension"] + + @property + def get_extension( + self, + ) -> Callable[ + [extension_registry_service.GetExtensionRequest], Awaitable[extension.Extension] + ]: + r"""Return a callable for the get extension method over gRPC. + + Gets an Extension. + + Returns: + Callable[[~.GetExtensionRequest], + Awaitable[~.Extension]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_extension" not in self._stubs: + self._stubs["get_extension"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionRegistryService/GetExtension", + request_serializer=extension_registry_service.GetExtensionRequest.serialize, + response_deserializer=extension.Extension.deserialize, + ) + return self._stubs["get_extension"] + + @property + def list_extensions( + self, + ) -> Callable[ + [extension_registry_service.ListExtensionsRequest], + Awaitable[extension_registry_service.ListExtensionsResponse], + ]: + r"""Return a callable for the list extensions method over gRPC. + + Lists Extensions in a location. + + Returns: + Callable[[~.ListExtensionsRequest], + Awaitable[~.ListExtensionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_extensions" not in self._stubs: + self._stubs["list_extensions"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionRegistryService/ListExtensions", + request_serializer=extension_registry_service.ListExtensionsRequest.serialize, + response_deserializer=extension_registry_service.ListExtensionsResponse.deserialize, + ) + return self._stubs["list_extensions"] + + @property + def update_extension( + self, + ) -> Callable[ + [extension_registry_service.UpdateExtensionRequest], + Awaitable[gca_extension.Extension], + ]: + r"""Return a callable for the update extension method over gRPC. + + Updates an Extension. + + Returns: + Callable[[~.UpdateExtensionRequest], + Awaitable[~.Extension]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "update_extension" not in self._stubs: + self._stubs["update_extension"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionRegistryService/UpdateExtension", + request_serializer=extension_registry_service.UpdateExtensionRequest.serialize, + response_deserializer=gca_extension.Extension.deserialize, + ) + return self._stubs["update_extension"] + + @property + def delete_extension( + self, + ) -> Callable[ + [extension_registry_service.DeleteExtensionRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete extension method over gRPC. + + Deletes an Extension. + + Returns: + Callable[[~.DeleteExtensionRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_extension" not in self._stubs: + self._stubs["delete_extension"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.ExtensionRegistryService/DeleteExtension", + request_serializer=extension_registry_service.DeleteExtensionRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_extension"] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("ExtensionRegistryServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/rest.py new file mode 100644 index 0000000000..fff925fb2a --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/extension_registry_service/transports/rest.py @@ -0,0 +1,5634 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import extension +from google.cloud.aiplatform_v1beta1.types import extension as gca_extension +from google.cloud.aiplatform_v1beta1.types import extension_registry_service +from google.longrunning import operations_pb2 # type: ignore + +from .base import ( + ExtensionRegistryServiceTransport, + DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, +) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class ExtensionRegistryServiceRestInterceptor: + """Interceptor for ExtensionRegistryService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the ExtensionRegistryServiceRestTransport. + + .. code-block:: python + class MyCustomExtensionRegistryServiceInterceptor(ExtensionRegistryServiceRestInterceptor): + def pre_delete_extension(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_extension(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_extension(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_extension(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_import_extension(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_import_extension(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_extensions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_extensions(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_extension(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_extension(self, response): + logging.log(f"Received response: {response}") + return response + + transport = ExtensionRegistryServiceRestTransport(interceptor=MyCustomExtensionRegistryServiceInterceptor()) + client = ExtensionRegistryServiceClient(transport=transport) + + + """ + + def pre_delete_extension( + self, + request: extension_registry_service.DeleteExtensionRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + extension_registry_service.DeleteExtensionRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for delete_extension + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_delete_extension( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_extension + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_get_extension( + self, + request: extension_registry_service.GetExtensionRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + extension_registry_service.GetExtensionRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for get_extension + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_get_extension(self, response: extension.Extension) -> extension.Extension: + """Post-rpc interceptor for get_extension + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_import_extension( + self, + request: extension_registry_service.ImportExtensionRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + extension_registry_service.ImportExtensionRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for import_extension + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_import_extension( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for import_extension + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_list_extensions( + self, + request: extension_registry_service.ListExtensionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + extension_registry_service.ListExtensionsRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for list_extensions + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_list_extensions( + self, response: extension_registry_service.ListExtensionsResponse + ) -> extension_registry_service.ListExtensionsResponse: + """Post-rpc interceptor for list_extensions + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_update_extension( + self, + request: extension_registry_service.UpdateExtensionRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + extension_registry_service.UpdateExtensionRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for update_extension + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_update_extension( + self, response: gca_extension.Extension + ) -> gca_extension.Extension: + """Post-rpc interceptor for update_extension + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.WaitOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the ExtensionRegistryService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the ExtensionRegistryService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class ExtensionRegistryServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: ExtensionRegistryServiceRestInterceptor + + +class ExtensionRegistryServiceRestTransport(ExtensionRegistryServiceTransport): + """REST backend transport for ExtensionRegistryService. + + A service for managing Vertex AI's Extension registry. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via an issue in this + library's source repository. Thank you! + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[ExtensionRegistryServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via a GitHub issue in + this library's repository. Thank you! + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or ExtensionRegistryServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", + }, + ], + "google.longrunning.Operations.WaitOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1beta1", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _DeleteExtension(ExtensionRegistryServiceRestStub): + def __hash__(self): + return hash("DeleteExtension") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: extension_registry_service.DeleteExtensionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete extension method over HTTP. + + Args: + request (~.extension_registry_service.DeleteExtensionRequest): + The request object. Request message for + [ExtensionRegistryService.DeleteExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.DeleteExtension]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_extension( + request, metadata + ) + pb_request = extension_registry_service.DeleteExtensionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_extension(resp) + return resp + + class _GetExtension(ExtensionRegistryServiceRestStub): + def __hash__(self): + return hash("GetExtension") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: extension_registry_service.GetExtensionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> extension.Extension: + r"""Call the get extension method over HTTP. + + Args: + request (~.extension_registry_service.GetExtensionRequest): + The request object. Request message for + [ExtensionRegistryService.GetExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.GetExtension]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.extension.Extension: + Extensions are tools for large + language models to access external data, + run computations, etc. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}", + }, + ] + request, metadata = self._interceptor.pre_get_extension(request, metadata) + pb_request = extension_registry_service.GetExtensionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = extension.Extension() + pb_resp = extension.Extension.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_extension(resp) + return resp + + class _ImportExtension(ExtensionRegistryServiceRestStub): + def __hash__(self): + return hash("ImportExtension") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: extension_registry_service.ImportExtensionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the import extension method over HTTP. + + Args: + request (~.extension_registry_service.ImportExtensionRequest): + The request object. Request message for + [ExtensionRegistryService.ImportExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ImportExtension]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{parent=projects/*/locations/*}/extensions:import", + "body": "extension", + }, + ] + request, metadata = self._interceptor.pre_import_extension( + request, metadata + ) + pb_request = extension_registry_service.ImportExtensionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_import_extension(resp) + return resp + + class _ListExtensions(ExtensionRegistryServiceRestStub): + def __hash__(self): + return hash("ListExtensions") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: extension_registry_service.ListExtensionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> extension_registry_service.ListExtensionsResponse: + r"""Call the list extensions method over HTTP. + + Args: + request (~.extension_registry_service.ListExtensionsRequest): + The request object. Request message for + [ExtensionRegistryService.ListExtensions][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ListExtensions]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.extension_registry_service.ListExtensionsResponse: + Response message for + [ExtensionRegistryService.ListExtensions][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ListExtensions] + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{parent=projects/*/locations/*}/extensions", + }, + ] + request, metadata = self._interceptor.pre_list_extensions(request, metadata) + pb_request = extension_registry_service.ListExtensionsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = extension_registry_service.ListExtensionsResponse() + pb_resp = extension_registry_service.ListExtensionsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_extensions(resp) + return resp + + class _UpdateExtension(ExtensionRegistryServiceRestStub): + def __hash__(self): + return hash("UpdateExtension") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask": {}, + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: extension_registry_service.UpdateExtensionRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gca_extension.Extension: + r"""Call the update extension method over HTTP. + + Args: + request (~.extension_registry_service.UpdateExtensionRequest): + The request object. Request message for + [ExtensionRegistryService.UpdateExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.UpdateExtension]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gca_extension.Extension: + Extensions are tools for large + language models to access external data, + run computations, etc. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "patch", + "uri": "/v1beta1/{extension.name=projects/*/locations/*/extensions/*}", + "body": "extension", + }, + ] + request, metadata = self._interceptor.pre_update_extension( + request, metadata + ) + pb_request = extension_registry_service.UpdateExtensionRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gca_extension.Extension() + pb_resp = gca_extension.Extension.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_extension(resp) + return resp + + @property + def delete_extension( + self, + ) -> Callable[ + [extension_registry_service.DeleteExtensionRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteExtension(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_extension( + self, + ) -> Callable[ + [extension_registry_service.GetExtensionRequest], extension.Extension + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetExtension(self._session, self._host, self._interceptor) # type: ignore + + @property + def import_extension( + self, + ) -> Callable[ + [extension_registry_service.ImportExtensionRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ImportExtension(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_extensions( + self, + ) -> Callable[ + [extension_registry_service.ListExtensionsRequest], + extension_registry_service.ListExtensionsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListExtensions(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_extension( + self, + ) -> Callable[ + [extension_registry_service.UpdateExtensionRequest], gca_extension.Extension + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateExtension(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation(ExtensionRegistryServiceRestStub): + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_location(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.Location() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_location(resp) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations(ExtensionRegistryServiceRestStub): + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*}/locations", + }, + ] + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_locations(resp) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy(ExtensionRegistryServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + ] + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy(ExtensionRegistryServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + ] + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions(ExtensionRegistryServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + ] + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(ExtensionRegistryServiceRestStub): + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation(ExtensionRegistryServiceRestStub): + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(ExtensionRegistryServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(ExtensionRegistryServiceRestStub): + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation(ExtensionRegistryServiceRestStub): + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_wait_operation(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("ExtensionRegistryServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py index 78405c2393..d35ee19662 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_admin_service/transports/rest.py @@ -985,6 +985,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1315,6 +1319,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1693,6 +1701,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2079,6 +2091,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2323,10 +2339,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2457,6 +2469,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -4768,6 +4784,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5155,6 +5175,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5593,6 +5617,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -6040,6 +6068,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -6284,10 +6316,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -6479,6 +6507,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py index 5fd1cf755d..109f8a8156 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_online_store_service/transports/rest.py @@ -1394,6 +1394,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1781,6 +1785,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2219,6 +2227,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2666,6 +2678,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2910,10 +2926,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -3105,6 +3117,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest.py index c09f80679f..be73a557c9 100644 --- a/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/feature_registry_service/transports/rest.py @@ -856,6 +856,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1186,6 +1190,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1564,6 +1572,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1950,6 +1962,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2194,10 +2210,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2328,6 +2340,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -4268,6 +4284,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4655,6 +4675,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5093,6 +5117,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5540,6 +5568,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5784,10 +5816,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -5979,6 +6007,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py index 791a4c82a5..f4e1f8aa7a 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_online_serving_service/transports/rest.py @@ -1509,6 +1509,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1896,6 +1900,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2334,6 +2342,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2781,6 +2793,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -3025,10 +3041,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -3220,6 +3232,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py index 9322dafcad..0aa3f8bf2c 100644 --- a/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/featurestore_service/transports/rest.py @@ -1205,6 +1205,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1535,6 +1539,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1913,6 +1921,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2299,6 +2311,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2543,10 +2559,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2677,6 +2689,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5748,6 +5764,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -6135,6 +6155,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -6573,6 +6597,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -7020,6 +7048,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -7264,10 +7296,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -7459,6 +7487,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py index 082627fc3d..46d1bb8b0c 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/index_endpoint_service/transports/rest.py @@ -796,6 +796,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1126,6 +1130,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1504,6 +1512,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1890,6 +1902,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2134,10 +2150,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2268,6 +2280,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -4017,6 +4033,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4404,6 +4424,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4842,6 +4866,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5289,6 +5317,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5533,10 +5565,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -5728,6 +5756,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/index_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/index_service/transports/rest.py index a352664aa6..817a783095 100644 --- a/google/cloud/aiplatform_v1beta1/services/index_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/index_service/transports/rest.py @@ -748,6 +748,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1078,6 +1082,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1456,6 +1464,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1842,6 +1854,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2086,10 +2102,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2220,6 +2232,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3846,6 +3862,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4233,6 +4253,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4671,6 +4695,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5118,6 +5146,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5362,10 +5394,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -5557,6 +5585,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py index 131089b979..e68b1159e1 100644 --- a/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/job_service/transports/rest.py @@ -1554,6 +1554,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1884,6 +1888,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2262,6 +2270,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2648,6 +2660,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2892,10 +2908,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3026,6 +3038,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -7469,6 +7485,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -7856,6 +7876,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -8294,6 +8318,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -8741,6 +8769,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -8985,10 +9017,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -9180,6 +9208,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/rest.py index 859de16f02..1b4290be1e 100644 --- a/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/llm_utility_service/transports/rest.py @@ -1219,6 +1219,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1606,6 +1610,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2044,6 +2052,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2491,6 +2503,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2735,10 +2751,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -2930,6 +2942,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/match_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/match_service/transports/rest.py index 1e8ed0be11..d749aa4613 100644 --- a/google/cloud/aiplatform_v1beta1/services/match_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/match_service/transports/rest.py @@ -1348,6 +1348,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1735,6 +1739,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2173,6 +2181,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2620,6 +2632,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2864,10 +2880,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -3059,6 +3071,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py index 86e9dd320d..c0d43fde79 100644 --- a/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/metadata_service/transports/rest.py @@ -1538,6 +1538,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1868,6 +1872,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2246,6 +2254,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2632,6 +2644,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2876,10 +2892,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3010,6 +3022,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -7127,6 +7143,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -7514,6 +7534,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -7952,6 +7976,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -8399,6 +8427,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -8643,10 +8675,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -8838,6 +8866,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest.py index eaa613f650..73f43ffadb 100644 --- a/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/migration_service/transports/rest.py @@ -601,6 +601,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -931,6 +935,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1309,6 +1317,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1695,6 +1707,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -1939,10 +1955,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2073,6 +2085,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3217,6 +3233,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3604,6 +3624,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4042,6 +4066,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4489,6 +4517,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4733,10 +4765,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -4928,6 +4956,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py index aaad4dff72..a7125e9620 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/model_garden_service/transports/rest.py @@ -1341,6 +1341,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1728,6 +1732,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2166,6 +2174,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2613,6 +2625,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2857,10 +2873,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -3052,6 +3064,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py index e608dac087..0848c65b34 100644 --- a/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/model_service/transports/rest.py @@ -1098,6 +1098,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1428,6 +1432,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1806,6 +1814,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2192,6 +2204,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2436,10 +2452,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2570,6 +2582,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -5314,6 +5330,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5701,6 +5721,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -6139,6 +6163,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -6586,6 +6614,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -6830,10 +6862,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -7025,6 +7053,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/__init__.py new file mode 100644 index 0000000000..4e038695c2 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import NotebookServiceClient +from .async_client import NotebookServiceAsyncClient + +__all__ = ( + "NotebookServiceClient", + "NotebookServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py new file mode 100644 index 0000000000..8bb9d123b5 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/async_client.py @@ -0,0 +1,2318 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.notebook_service import pagers +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import network_spec +from google.cloud.aiplatform_v1beta1.types import notebook_euc_config +from google.cloud.aiplatform_v1beta1.types import notebook_idle_shutdown_config +from google.cloud.aiplatform_v1beta1.types import notebook_runtime +from google.cloud.aiplatform_v1beta1.types import ( + notebook_runtime as gca_notebook_runtime, +) +from google.cloud.aiplatform_v1beta1.types import notebook_runtime_template_ref +from google.cloud.aiplatform_v1beta1.types import notebook_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import NotebookServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import NotebookServiceGrpcAsyncIOTransport +from .client import NotebookServiceClient + + +class NotebookServiceAsyncClient: + """The interface for Vertex Notebook service (a.k.a. Colab on + Workbench). + """ + + _client: NotebookServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = NotebookServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = NotebookServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = NotebookServiceClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = NotebookServiceClient._DEFAULT_UNIVERSE + + network_path = staticmethod(NotebookServiceClient.network_path) + parse_network_path = staticmethod(NotebookServiceClient.parse_network_path) + notebook_runtime_path = staticmethod(NotebookServiceClient.notebook_runtime_path) + parse_notebook_runtime_path = staticmethod( + NotebookServiceClient.parse_notebook_runtime_path + ) + notebook_runtime_template_path = staticmethod( + NotebookServiceClient.notebook_runtime_template_path + ) + parse_notebook_runtime_template_path = staticmethod( + NotebookServiceClient.parse_notebook_runtime_template_path + ) + subnetwork_path = staticmethod(NotebookServiceClient.subnetwork_path) + parse_subnetwork_path = staticmethod(NotebookServiceClient.parse_subnetwork_path) + common_billing_account_path = staticmethod( + NotebookServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + NotebookServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(NotebookServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + NotebookServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + NotebookServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + NotebookServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(NotebookServiceClient.common_project_path) + parse_common_project_path = staticmethod( + NotebookServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(NotebookServiceClient.common_location_path) + parse_common_location_path = staticmethod( + NotebookServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NotebookServiceAsyncClient: The constructed client. + """ + return NotebookServiceClient.from_service_account_info.__func__(NotebookServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NotebookServiceAsyncClient: The constructed client. + """ + return NotebookServiceClient.from_service_account_file.__func__(NotebookServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return NotebookServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> NotebookServiceTransport: + """Returns the transport used by the client instance. + + Returns: + NotebookServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = functools.partial( + type(NotebookServiceClient).get_transport_class, type(NotebookServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, NotebookServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the notebook service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.NotebookServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = NotebookServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_notebook_runtime_template( + self, + request: Optional[ + Union[notebook_service.CreateNotebookRuntimeTemplateRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + notebook_runtime_template: Optional[ + notebook_runtime.NotebookRuntimeTemplate + ] = None, + notebook_runtime_template_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a NotebookRuntimeTemplate. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_create_notebook_runtime_template(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + notebook_runtime_template = aiplatform_v1beta1.NotebookRuntimeTemplate() + notebook_runtime_template.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateNotebookRuntimeTemplateRequest( + parent="parent_value", + notebook_runtime_template=notebook_runtime_template, + ) + + # Make the request + operation = client.create_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.CreateNotebookRuntimeTemplateRequest, dict]]): + The request object. Request message for + [NotebookService.CreateNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.CreateNotebookRuntimeTemplate]. + parent (:class:`str`): + Required. The resource name of the Location to create + the NotebookRuntimeTemplate. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_template (:class:`google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplate`): + Required. The NotebookRuntimeTemplate + to create. + + This corresponds to the ``notebook_runtime_template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_template_id (:class:`str`): + Optional. User specified ID for the + notebook runtime template. + + This corresponds to the ``notebook_runtime_template_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplate` A template that specifies runtime configurations such as machine type, + runtime version, network configurations, etc. + Multiple runtimes can be created from a runtime + template. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, notebook_runtime_template, notebook_runtime_template_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.CreateNotebookRuntimeTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if notebook_runtime_template is not None: + request.notebook_runtime_template = notebook_runtime_template + if notebook_runtime_template_id is not None: + request.notebook_runtime_template_id = notebook_runtime_template_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_notebook_runtime_template, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + notebook_runtime.NotebookRuntimeTemplate, + metadata_type=notebook_service.CreateNotebookRuntimeTemplateOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_notebook_runtime_template( + self, + request: Optional[ + Union[notebook_service.GetNotebookRuntimeTemplateRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_runtime.NotebookRuntimeTemplate: + r"""Gets a NotebookRuntimeTemplate. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_get_notebook_runtime_template(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + response = await client.get_notebook_runtime_template(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.GetNotebookRuntimeTemplateRequest, dict]]): + The request object. Request message for + [NotebookService.GetNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.GetNotebookRuntimeTemplate] + name (:class:`str`): + Required. The name of the NotebookRuntimeTemplate + resource. Format: + ``projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplate: + A template that specifies runtime + configurations such as machine type, + runtime version, network configurations, + etc. Multiple runtimes can be created + from a runtime template. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.GetNotebookRuntimeTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_notebook_runtime_template, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_notebook_runtime_templates( + self, + request: Optional[ + Union[notebook_service.ListNotebookRuntimeTemplatesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotebookRuntimeTemplatesAsyncPager: + r"""Lists NotebookRuntimeTemplates in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_list_notebook_runtime_templates(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListNotebookRuntimeTemplatesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtime_templates(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimeTemplatesRequest, dict]]): + The request object. Request message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimeTemplates]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the NotebookRuntimeTemplates. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.notebook_service.pagers.ListNotebookRuntimeTemplatesAsyncPager: + Response message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimeTemplates]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.ListNotebookRuntimeTemplatesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_notebook_runtime_templates, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListNotebookRuntimeTemplatesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_notebook_runtime_template( + self, + request: Optional[ + Union[notebook_service.DeleteNotebookRuntimeTemplateRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a NotebookRuntimeTemplate. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_notebook_runtime_template(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.DeleteNotebookRuntimeTemplateRequest, dict]]): + The request object. Request message for + [NotebookService.DeleteNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.DeleteNotebookRuntimeTemplate]. + name (:class:`str`): + Required. The name of the NotebookRuntimeTemplate + resource to be deleted. Format: + ``projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.DeleteNotebookRuntimeTemplateRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_notebook_runtime_template, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def assign_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.AssignNotebookRuntimeRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + notebook_runtime_template: Optional[str] = None, + notebook_runtime: Optional[gca_notebook_runtime.NotebookRuntime] = None, + notebook_runtime_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Assigns a NotebookRuntime to a user for a particular + Notebook file. This method will either returns an + existing assignment or generates a new one. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_assign_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + notebook_runtime = aiplatform_v1beta1.NotebookRuntime() + notebook_runtime.runtime_user = "runtime_user_value" + notebook_runtime.display_name = "display_name_value" + + request = aiplatform_v1beta1.AssignNotebookRuntimeRequest( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=notebook_runtime, + ) + + # Make the request + operation = client.assign_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.AssignNotebookRuntimeRequest, dict]]): + The request object. Request message for + [NotebookService.AssignNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.AssignNotebookRuntime]. + parent (:class:`str`): + Required. The resource name of the Location to get the + NotebookRuntime assignment. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_template (:class:`str`): + Required. The resource name of the + NotebookRuntimeTemplate based on which a + NotebookRuntime will be assigned (reuse + or create a new one). + + This corresponds to the ``notebook_runtime_template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime (:class:`google.cloud.aiplatform_v1beta1.types.NotebookRuntime`): + Required. Provide runtime specific + information (e.g. runtime owner, + notebook id) used for NotebookRuntime + assignment. + + This corresponds to the ``notebook_runtime`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_id (:class:`str`): + Optional. User specified ID for the + notebook runtime. + + This corresponds to the ``notebook_runtime_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.NotebookRuntime` A runtime is a virtual machine allocated to a particular user for a + particular Notebook file on temporary basis with + lifetime limited to 24 hours. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, notebook_runtime_template, notebook_runtime, notebook_runtime_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.AssignNotebookRuntimeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if notebook_runtime_template is not None: + request.notebook_runtime_template = notebook_runtime_template + if notebook_runtime is not None: + request.notebook_runtime = notebook_runtime + if notebook_runtime_id is not None: + request.notebook_runtime_id = notebook_runtime_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.assign_notebook_runtime, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gca_notebook_runtime.NotebookRuntime, + metadata_type=notebook_service.AssignNotebookRuntimeOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.GetNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_runtime.NotebookRuntime: + r"""Gets a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_get_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + response = await client.get_notebook_runtime(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.GetNotebookRuntimeRequest, dict]]): + The request object. Request message for + [NotebookService.GetNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.GetNotebookRuntime] + name (:class:`str`): + Required. The name of the + NotebookRuntime resource. Instead of + checking whether the name is in valid + NotebookRuntime resource name format, + directly throw NotFound exception if + there is no such NotebookRuntime in + spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.NotebookRuntime: + A runtime is a virtual machine + allocated to a particular user for a + particular Notebook file on temporary + basis with lifetime limited to 24 hours. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.GetNotebookRuntimeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_notebook_runtime, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_notebook_runtimes( + self, + request: Optional[ + Union[notebook_service.ListNotebookRuntimesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotebookRuntimesAsyncPager: + r"""Lists NotebookRuntimes in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_list_notebook_runtimes(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListNotebookRuntimesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtimes(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimesRequest, dict]]): + The request object. Request message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimes]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the NotebookRuntimes. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.notebook_service.pagers.ListNotebookRuntimesAsyncPager: + Response message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.ListNotebookRuntimesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_notebook_runtimes, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListNotebookRuntimesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.DeleteNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.DeleteNotebookRuntimeRequest, dict]]): + The request object. Request message for + [NotebookService.DeleteNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.DeleteNotebookRuntime]. + name (:class:`str`): + Required. The name of the + NotebookRuntime resource to be deleted. + Instead of checking whether the name is + in valid NotebookRuntime resource name + format, directly throw NotFound + exception if there is no such + NotebookRuntime in spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.DeleteNotebookRuntimeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_notebook_runtime, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def upgrade_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.UpgradeNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Upgrades a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_upgrade_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpgradeNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.upgrade_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.UpgradeNotebookRuntimeRequest, dict]]): + The request object. Request message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.UpgradeNotebookRuntime]. + name (:class:`str`): + Required. The name of the + NotebookRuntime resource to be upgrade. + Instead of checking whether the name is + in valid NotebookRuntime resource name + format, directly throw NotFound + exception if there is no such + NotebookRuntime in spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.UpgradeNotebookRuntimeResponse` Response message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.UpgradeNotebookRuntime]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.UpgradeNotebookRuntimeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.upgrade_notebook_runtime, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + notebook_service.UpgradeNotebookRuntimeResponse, + metadata_type=notebook_service.UpgradeNotebookRuntimeOperationMetadata, + ) + + # Done; return the response. + return response + + async def start_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.StartNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Starts a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_start_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.StartNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.start_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.StartNotebookRuntimeRequest, dict]]): + The request object. Request message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.StartNotebookRuntime]. + name (:class:`str`): + Required. The name of the + NotebookRuntime resource to be started. + Instead of checking whether the name is + in valid NotebookRuntime resource name + format, directly throw NotFound + exception if there is no such + NotebookRuntime in spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.StartNotebookRuntimeResponse` Response message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.StartNotebookRuntime]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = notebook_service.StartNotebookRuntimeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.start_notebook_runtime, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + notebook_service.StartNotebookRuntimeResponse, + metadata_type=notebook_service.StartNotebookRuntimeOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "NotebookServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("NotebookServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/client.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/client.py new file mode 100644 index 0000000000..a0ff87a2f0 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/client.py @@ -0,0 +1,2803 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.notebook_service import pagers +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import network_spec +from google.cloud.aiplatform_v1beta1.types import notebook_euc_config +from google.cloud.aiplatform_v1beta1.types import notebook_idle_shutdown_config +from google.cloud.aiplatform_v1beta1.types import notebook_runtime +from google.cloud.aiplatform_v1beta1.types import ( + notebook_runtime as gca_notebook_runtime, +) +from google.cloud.aiplatform_v1beta1.types import notebook_runtime_template_ref +from google.cloud.aiplatform_v1beta1.types import notebook_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import NotebookServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import NotebookServiceGrpcTransport +from .transports.grpc_asyncio import NotebookServiceGrpcAsyncIOTransport +from .transports.rest import NotebookServiceRestTransport + + +class NotebookServiceClientMeta(type): + """Metaclass for the NotebookService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[NotebookServiceTransport]] + _transport_registry["grpc"] = NotebookServiceGrpcTransport + _transport_registry["grpc_asyncio"] = NotebookServiceGrpcAsyncIOTransport + _transport_registry["rest"] = NotebookServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[NotebookServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class NotebookServiceClient(metaclass=NotebookServiceClientMeta): + """The interface for Vertex Notebook service (a.k.a. Colab on + Workbench). + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NotebookServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NotebookServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> NotebookServiceTransport: + """Returns the transport used by the client instance. + + Returns: + NotebookServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def network_path( + project: str, + network: str, + ) -> str: + """Returns a fully-qualified network string.""" + return "projects/{project}/global/networks/{network}".format( + project=project, + network=network, + ) + + @staticmethod + def parse_network_path(path: str) -> Dict[str, str]: + """Parses a network path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/global/networks/(?P.+?)$", path + ) + return m.groupdict() if m else {} + + @staticmethod + def notebook_runtime_path( + project: str, + location: str, + notebook_runtime: str, + ) -> str: + """Returns a fully-qualified notebook_runtime string.""" + return "projects/{project}/locations/{location}/notebookRuntimes/{notebook_runtime}".format( + project=project, + location=location, + notebook_runtime=notebook_runtime, + ) + + @staticmethod + def parse_notebook_runtime_path(path: str) -> Dict[str, str]: + """Parses a notebook_runtime path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/notebookRuntimes/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def notebook_runtime_template_path( + project: str, + location: str, + notebook_runtime_template: str, + ) -> str: + """Returns a fully-qualified notebook_runtime_template string.""" + return "projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}".format( + project=project, + location=location, + notebook_runtime_template=notebook_runtime_template, + ) + + @staticmethod + def parse_notebook_runtime_template_path(path: str) -> Dict[str, str]: + """Parses a notebook_runtime_template path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/notebookRuntimeTemplates/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def subnetwork_path( + project: str, + region: str, + subnetwork: str, + ) -> str: + """Returns a fully-qualified subnetwork string.""" + return "projects/{project}/regions/{region}/subnetworks/{subnetwork}".format( + project=project, + region=region, + subnetwork=subnetwork, + ) + + @staticmethod + def parse_subnetwork_path(path: str) -> Dict[str, str]: + """Parses a subnetwork path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/regions/(?P.+?)/subnetworks/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = NotebookServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = NotebookServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = NotebookServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = NotebookServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = NotebookServiceClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or NotebookServiceClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, NotebookServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the notebook service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, NotebookServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = NotebookServiceClient._read_environment_variables() + self._client_cert_source = NotebookServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = NotebookServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, NotebookServiceTransport) + if transport_provided: + # transport is a NotebookServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(NotebookServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or NotebookServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(cast(str, transport)) + self._transport = Transport( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + def create_notebook_runtime_template( + self, + request: Optional[ + Union[notebook_service.CreateNotebookRuntimeTemplateRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + notebook_runtime_template: Optional[ + notebook_runtime.NotebookRuntimeTemplate + ] = None, + notebook_runtime_template_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a NotebookRuntimeTemplate. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_create_notebook_runtime_template(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + notebook_runtime_template = aiplatform_v1beta1.NotebookRuntimeTemplate() + notebook_runtime_template.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateNotebookRuntimeTemplateRequest( + parent="parent_value", + notebook_runtime_template=notebook_runtime_template, + ) + + # Make the request + operation = client.create_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateNotebookRuntimeTemplateRequest, dict]): + The request object. Request message for + [NotebookService.CreateNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.CreateNotebookRuntimeTemplate]. + parent (str): + Required. The resource name of the Location to create + the NotebookRuntimeTemplate. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_template (google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplate): + Required. The NotebookRuntimeTemplate + to create. + + This corresponds to the ``notebook_runtime_template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_template_id (str): + Optional. User specified ID for the + notebook runtime template. + + This corresponds to the ``notebook_runtime_template_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplate` A template that specifies runtime configurations such as machine type, + runtime version, network configurations, etc. + Multiple runtimes can be created from a runtime + template. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, notebook_runtime_template, notebook_runtime_template_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.CreateNotebookRuntimeTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notebook_service.CreateNotebookRuntimeTemplateRequest + ): + request = notebook_service.CreateNotebookRuntimeTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if notebook_runtime_template is not None: + request.notebook_runtime_template = notebook_runtime_template + if notebook_runtime_template_id is not None: + request.notebook_runtime_template_id = notebook_runtime_template_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.create_notebook_runtime_template + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + notebook_runtime.NotebookRuntimeTemplate, + metadata_type=notebook_service.CreateNotebookRuntimeTemplateOperationMetadata, + ) + + # Done; return the response. + return response + + def get_notebook_runtime_template( + self, + request: Optional[ + Union[notebook_service.GetNotebookRuntimeTemplateRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_runtime.NotebookRuntimeTemplate: + r"""Gets a NotebookRuntimeTemplate. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_get_notebook_runtime_template(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + response = client.get_notebook_runtime_template(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetNotebookRuntimeTemplateRequest, dict]): + The request object. Request message for + [NotebookService.GetNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.GetNotebookRuntimeTemplate] + name (str): + Required. The name of the NotebookRuntimeTemplate + resource. Format: + ``projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplate: + A template that specifies runtime + configurations such as machine type, + runtime version, network configurations, + etc. Multiple runtimes can be created + from a runtime template. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.GetNotebookRuntimeTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.GetNotebookRuntimeTemplateRequest): + request = notebook_service.GetNotebookRuntimeTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.get_notebook_runtime_template + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_notebook_runtime_templates( + self, + request: Optional[ + Union[notebook_service.ListNotebookRuntimeTemplatesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotebookRuntimeTemplatesPager: + r"""Lists NotebookRuntimeTemplates in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_list_notebook_runtime_templates(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListNotebookRuntimeTemplatesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtime_templates(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimeTemplatesRequest, dict]): + The request object. Request message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimeTemplates]. + parent (str): + Required. The resource name of the Location from which + to list the NotebookRuntimeTemplates. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.notebook_service.pagers.ListNotebookRuntimeTemplatesPager: + Response message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimeTemplates]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.ListNotebookRuntimeTemplatesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notebook_service.ListNotebookRuntimeTemplatesRequest + ): + request = notebook_service.ListNotebookRuntimeTemplatesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.list_notebook_runtime_templates + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListNotebookRuntimeTemplatesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_notebook_runtime_template( + self, + request: Optional[ + Union[notebook_service.DeleteNotebookRuntimeTemplateRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a NotebookRuntimeTemplate. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_delete_notebook_runtime_template(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteNotebookRuntimeTemplateRequest, dict]): + The request object. Request message for + [NotebookService.DeleteNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.DeleteNotebookRuntimeTemplate]. + name (str): + Required. The name of the NotebookRuntimeTemplate + resource to be deleted. Format: + ``projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.DeleteNotebookRuntimeTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, notebook_service.DeleteNotebookRuntimeTemplateRequest + ): + request = notebook_service.DeleteNotebookRuntimeTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.delete_notebook_runtime_template + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def assign_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.AssignNotebookRuntimeRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + notebook_runtime_template: Optional[str] = None, + notebook_runtime: Optional[gca_notebook_runtime.NotebookRuntime] = None, + notebook_runtime_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Assigns a NotebookRuntime to a user for a particular + Notebook file. This method will either returns an + existing assignment or generates a new one. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_assign_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + notebook_runtime = aiplatform_v1beta1.NotebookRuntime() + notebook_runtime.runtime_user = "runtime_user_value" + notebook_runtime.display_name = "display_name_value" + + request = aiplatform_v1beta1.AssignNotebookRuntimeRequest( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=notebook_runtime, + ) + + # Make the request + operation = client.assign_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.AssignNotebookRuntimeRequest, dict]): + The request object. Request message for + [NotebookService.AssignNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.AssignNotebookRuntime]. + parent (str): + Required. The resource name of the Location to get the + NotebookRuntime assignment. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_template (str): + Required. The resource name of the + NotebookRuntimeTemplate based on which a + NotebookRuntime will be assigned (reuse + or create a new one). + + This corresponds to the ``notebook_runtime_template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime (google.cloud.aiplatform_v1beta1.types.NotebookRuntime): + Required. Provide runtime specific + information (e.g. runtime owner, + notebook id) used for NotebookRuntime + assignment. + + This corresponds to the ``notebook_runtime`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notebook_runtime_id (str): + Optional. User specified ID for the + notebook runtime. + + This corresponds to the ``notebook_runtime_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.NotebookRuntime` A runtime is a virtual machine allocated to a particular user for a + particular Notebook file on temporary basis with + lifetime limited to 24 hours. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any( + [parent, notebook_runtime_template, notebook_runtime, notebook_runtime_id] + ) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.AssignNotebookRuntimeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.AssignNotebookRuntimeRequest): + request = notebook_service.AssignNotebookRuntimeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if notebook_runtime_template is not None: + request.notebook_runtime_template = notebook_runtime_template + if notebook_runtime is not None: + request.notebook_runtime = notebook_runtime + if notebook_runtime_id is not None: + request.notebook_runtime_id = notebook_runtime_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.assign_notebook_runtime] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + gca_notebook_runtime.NotebookRuntime, + metadata_type=notebook_service.AssignNotebookRuntimeOperationMetadata, + ) + + # Done; return the response. + return response + + def get_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.GetNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_runtime.NotebookRuntime: + r"""Gets a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_get_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + response = client.get_notebook_runtime(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetNotebookRuntimeRequest, dict]): + The request object. Request message for + [NotebookService.GetNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.GetNotebookRuntime] + name (str): + Required. The name of the + NotebookRuntime resource. Instead of + checking whether the name is in valid + NotebookRuntime resource name format, + directly throw NotFound exception if + there is no such NotebookRuntime in + spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.NotebookRuntime: + A runtime is a virtual machine + allocated to a particular user for a + particular Notebook file on temporary + basis with lifetime limited to 24 hours. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.GetNotebookRuntimeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.GetNotebookRuntimeRequest): + request = notebook_service.GetNotebookRuntimeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_notebook_runtime] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_notebook_runtimes( + self, + request: Optional[ + Union[notebook_service.ListNotebookRuntimesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNotebookRuntimesPager: + r"""Lists NotebookRuntimes in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_list_notebook_runtimes(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListNotebookRuntimesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtimes(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimesRequest, dict]): + The request object. Request message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimes]. + parent (str): + Required. The resource name of the Location from which + to list the NotebookRuntimes. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.notebook_service.pagers.ListNotebookRuntimesPager: + Response message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimes]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.ListNotebookRuntimesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.ListNotebookRuntimesRequest): + request = notebook_service.ListNotebookRuntimesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_notebook_runtimes] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListNotebookRuntimesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.DeleteNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_delete_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteNotebookRuntimeRequest, dict]): + The request object. Request message for + [NotebookService.DeleteNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.DeleteNotebookRuntime]. + name (str): + Required. The name of the + NotebookRuntime resource to be deleted. + Instead of checking whether the name is + in valid NotebookRuntime resource name + format, directly throw NotFound + exception if there is no such + NotebookRuntime in spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.DeleteNotebookRuntimeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.DeleteNotebookRuntimeRequest): + request = notebook_service.DeleteNotebookRuntimeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_notebook_runtime] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def upgrade_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.UpgradeNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Upgrades a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_upgrade_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpgradeNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.upgrade_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UpgradeNotebookRuntimeRequest, dict]): + The request object. Request message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.UpgradeNotebookRuntime]. + name (str): + Required. The name of the + NotebookRuntime resource to be upgrade. + Instead of checking whether the name is + in valid NotebookRuntime resource name + format, directly throw NotFound + exception if there is no such + NotebookRuntime in spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.UpgradeNotebookRuntimeResponse` Response message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.UpgradeNotebookRuntime]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.UpgradeNotebookRuntimeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.UpgradeNotebookRuntimeRequest): + request = notebook_service.UpgradeNotebookRuntimeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.upgrade_notebook_runtime] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + notebook_service.UpgradeNotebookRuntimeResponse, + metadata_type=notebook_service.UpgradeNotebookRuntimeOperationMetadata, + ) + + # Done; return the response. + return response + + def start_notebook_runtime( + self, + request: Optional[ + Union[notebook_service.StartNotebookRuntimeRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Starts a NotebookRuntime. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_start_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.StartNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.start_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.StartNotebookRuntimeRequest, dict]): + The request object. Request message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.StartNotebookRuntime]. + name (str): + Required. The name of the + NotebookRuntime resource to be started. + Instead of checking whether the name is + in valid NotebookRuntime resource name + format, directly throw NotFound + exception if there is no such + NotebookRuntime in spanner. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.StartNotebookRuntimeResponse` Response message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.StartNotebookRuntime]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a notebook_service.StartNotebookRuntimeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, notebook_service.StartNotebookRuntimeRequest): + request = notebook_service.StartNotebookRuntimeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.start_notebook_runtime] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + notebook_service.StartNotebookRuntimeResponse, + metadata_type=notebook_service.StartNotebookRuntimeOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "NotebookServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("NotebookServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/pagers.py new file mode 100644 index 0000000000..f6e467a90f --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/pagers.py @@ -0,0 +1,290 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, +) + +from google.cloud.aiplatform_v1beta1.types import notebook_runtime +from google.cloud.aiplatform_v1beta1.types import notebook_service + + +class ListNotebookRuntimeTemplatesPager: + """A pager for iterating through ``list_notebook_runtime_templates`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimeTemplatesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``notebook_runtime_templates`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListNotebookRuntimeTemplates`` requests and continue to iterate + through the ``notebook_runtime_templates`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimeTemplatesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., notebook_service.ListNotebookRuntimeTemplatesResponse], + request: notebook_service.ListNotebookRuntimeTemplatesRequest, + response: notebook_service.ListNotebookRuntimeTemplatesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimeTemplatesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimeTemplatesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notebook_service.ListNotebookRuntimeTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[notebook_service.ListNotebookRuntimeTemplatesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[notebook_runtime.NotebookRuntimeTemplate]: + for page in self.pages: + yield from page.notebook_runtime_templates + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListNotebookRuntimeTemplatesAsyncPager: + """A pager for iterating through ``list_notebook_runtime_templates`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimeTemplatesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``notebook_runtime_templates`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListNotebookRuntimeTemplates`` requests and continue to iterate + through the ``notebook_runtime_templates`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimeTemplatesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[notebook_service.ListNotebookRuntimeTemplatesResponse] + ], + request: notebook_service.ListNotebookRuntimeTemplatesRequest, + response: notebook_service.ListNotebookRuntimeTemplatesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimeTemplatesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimeTemplatesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notebook_service.ListNotebookRuntimeTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[notebook_service.ListNotebookRuntimeTemplatesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[notebook_runtime.NotebookRuntimeTemplate]: + async def async_generator(): + async for page in self.pages: + for response in page.notebook_runtime_templates: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListNotebookRuntimesPager: + """A pager for iterating through ``list_notebook_runtimes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``notebook_runtimes`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListNotebookRuntimes`` requests and continue to iterate + through the ``notebook_runtimes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., notebook_service.ListNotebookRuntimesResponse], + request: notebook_service.ListNotebookRuntimesRequest, + response: notebook_service.ListNotebookRuntimesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notebook_service.ListNotebookRuntimesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[notebook_service.ListNotebookRuntimesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[notebook_runtime.NotebookRuntime]: + for page in self.pages: + yield from page.notebook_runtimes + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListNotebookRuntimesAsyncPager: + """A pager for iterating through ``list_notebook_runtimes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``notebook_runtimes`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListNotebookRuntimes`` requests and continue to iterate + through the ``notebook_runtimes`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[notebook_service.ListNotebookRuntimesResponse]], + request: notebook_service.ListNotebookRuntimesRequest, + response: notebook_service.ListNotebookRuntimesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = notebook_service.ListNotebookRuntimesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[notebook_service.ListNotebookRuntimesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[notebook_runtime.NotebookRuntime]: + async def async_generator(): + async for page in self.pages: + for response in page.notebook_runtimes: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/__init__.py new file mode 100644 index 0000000000..eedf575a7f --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import NotebookServiceTransport +from .grpc import NotebookServiceGrpcTransport +from .grpc_asyncio import NotebookServiceGrpcAsyncIOTransport +from .rest import NotebookServiceRestTransport +from .rest import NotebookServiceRestInterceptor + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[NotebookServiceTransport]] +_transport_registry["grpc"] = NotebookServiceGrpcTransport +_transport_registry["grpc_asyncio"] = NotebookServiceGrpcAsyncIOTransport +_transport_registry["rest"] = NotebookServiceRestTransport + +__all__ = ( + "NotebookServiceTransport", + "NotebookServiceGrpcTransport", + "NotebookServiceGrpcAsyncIOTransport", + "NotebookServiceRestTransport", + "NotebookServiceRestInterceptor", +) diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/base.py new file mode 100644 index 0000000000..6772732ffa --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/base.py @@ -0,0 +1,402 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import notebook_runtime +from google.cloud.aiplatform_v1beta1.types import notebook_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class NotebookServiceTransport(abc.ABC): + """Abstract transport class for NotebookService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_notebook_runtime_template: gapic_v1.method.wrap_method( + self.create_notebook_runtime_template, + default_timeout=None, + client_info=client_info, + ), + self.get_notebook_runtime_template: gapic_v1.method.wrap_method( + self.get_notebook_runtime_template, + default_timeout=None, + client_info=client_info, + ), + self.list_notebook_runtime_templates: gapic_v1.method.wrap_method( + self.list_notebook_runtime_templates, + default_timeout=None, + client_info=client_info, + ), + self.delete_notebook_runtime_template: gapic_v1.method.wrap_method( + self.delete_notebook_runtime_template, + default_timeout=None, + client_info=client_info, + ), + self.assign_notebook_runtime: gapic_v1.method.wrap_method( + self.assign_notebook_runtime, + default_timeout=None, + client_info=client_info, + ), + self.get_notebook_runtime: gapic_v1.method.wrap_method( + self.get_notebook_runtime, + default_timeout=None, + client_info=client_info, + ), + self.list_notebook_runtimes: gapic_v1.method.wrap_method( + self.list_notebook_runtimes, + default_timeout=None, + client_info=client_info, + ), + self.delete_notebook_runtime: gapic_v1.method.wrap_method( + self.delete_notebook_runtime, + default_timeout=None, + client_info=client_info, + ), + self.upgrade_notebook_runtime: gapic_v1.method.wrap_method( + self.upgrade_notebook_runtime, + default_timeout=None, + client_info=client_info, + ), + self.start_notebook_runtime: gapic_v1.method.wrap_method( + self.start_notebook_runtime, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.CreateNotebookRuntimeTemplateRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeTemplateRequest], + Union[ + notebook_runtime.NotebookRuntimeTemplate, + Awaitable[notebook_runtime.NotebookRuntimeTemplate], + ], + ]: + raise NotImplementedError() + + @property + def list_notebook_runtime_templates( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimeTemplatesRequest], + Union[ + notebook_service.ListNotebookRuntimeTemplatesResponse, + Awaitable[notebook_service.ListNotebookRuntimeTemplatesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeTemplateRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def assign_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.AssignNotebookRuntimeRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeRequest], + Union[ + notebook_runtime.NotebookRuntime, + Awaitable[notebook_runtime.NotebookRuntime], + ], + ]: + raise NotImplementedError() + + @property + def list_notebook_runtimes( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimesRequest], + Union[ + notebook_service.ListNotebookRuntimesResponse, + Awaitable[notebook_service.ListNotebookRuntimesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def upgrade_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.UpgradeNotebookRuntimeRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def start_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.StartNotebookRuntimeRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("NotebookServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc.py new file mode 100644 index 0000000000..097e8b9565 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc.py @@ -0,0 +1,763 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import notebook_runtime +from google.cloud.aiplatform_v1beta1.types import notebook_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import NotebookServiceTransport, DEFAULT_CLIENT_INFO + + +class NotebookServiceGrpcTransport(NotebookServiceTransport): + """gRPC backend transport for NotebookService. + + The interface for Vertex Notebook service (a.k.a. Colab on + Workbench). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.CreateNotebookRuntimeTemplateRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the create notebook runtime + template method over gRPC. + + Creates a NotebookRuntimeTemplate. + + Returns: + Callable[[~.CreateNotebookRuntimeTemplateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_notebook_runtime_template" not in self._stubs: + self._stubs[ + "create_notebook_runtime_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/CreateNotebookRuntimeTemplate", + request_serializer=notebook_service.CreateNotebookRuntimeTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_notebook_runtime_template"] + + @property + def get_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeTemplateRequest], + notebook_runtime.NotebookRuntimeTemplate, + ]: + r"""Return a callable for the get notebook runtime template method over gRPC. + + Gets a NotebookRuntimeTemplate. + + Returns: + Callable[[~.GetNotebookRuntimeTemplateRequest], + ~.NotebookRuntimeTemplate]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notebook_runtime_template" not in self._stubs: + self._stubs[ + "get_notebook_runtime_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/GetNotebookRuntimeTemplate", + request_serializer=notebook_service.GetNotebookRuntimeTemplateRequest.serialize, + response_deserializer=notebook_runtime.NotebookRuntimeTemplate.deserialize, + ) + return self._stubs["get_notebook_runtime_template"] + + @property + def list_notebook_runtime_templates( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimeTemplatesRequest], + notebook_service.ListNotebookRuntimeTemplatesResponse, + ]: + r"""Return a callable for the list notebook runtime + templates method over gRPC. + + Lists NotebookRuntimeTemplates in a Location. + + Returns: + Callable[[~.ListNotebookRuntimeTemplatesRequest], + ~.ListNotebookRuntimeTemplatesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notebook_runtime_templates" not in self._stubs: + self._stubs[ + "list_notebook_runtime_templates" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/ListNotebookRuntimeTemplates", + request_serializer=notebook_service.ListNotebookRuntimeTemplatesRequest.serialize, + response_deserializer=notebook_service.ListNotebookRuntimeTemplatesResponse.deserialize, + ) + return self._stubs["list_notebook_runtime_templates"] + + @property + def delete_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeTemplateRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the delete notebook runtime + template method over gRPC. + + Deletes a NotebookRuntimeTemplate. + + Returns: + Callable[[~.DeleteNotebookRuntimeTemplateRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_notebook_runtime_template" not in self._stubs: + self._stubs[ + "delete_notebook_runtime_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/DeleteNotebookRuntimeTemplate", + request_serializer=notebook_service.DeleteNotebookRuntimeTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_notebook_runtime_template"] + + @property + def assign_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.AssignNotebookRuntimeRequest], operations_pb2.Operation + ]: + r"""Return a callable for the assign notebook runtime method over gRPC. + + Assigns a NotebookRuntime to a user for a particular + Notebook file. This method will either returns an + existing assignment or generates a new one. + + Returns: + Callable[[~.AssignNotebookRuntimeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "assign_notebook_runtime" not in self._stubs: + self._stubs["assign_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/AssignNotebookRuntime", + request_serializer=notebook_service.AssignNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["assign_notebook_runtime"] + + @property + def get_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeRequest], notebook_runtime.NotebookRuntime + ]: + r"""Return a callable for the get notebook runtime method over gRPC. + + Gets a NotebookRuntime. + + Returns: + Callable[[~.GetNotebookRuntimeRequest], + ~.NotebookRuntime]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notebook_runtime" not in self._stubs: + self._stubs["get_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/GetNotebookRuntime", + request_serializer=notebook_service.GetNotebookRuntimeRequest.serialize, + response_deserializer=notebook_runtime.NotebookRuntime.deserialize, + ) + return self._stubs["get_notebook_runtime"] + + @property + def list_notebook_runtimes( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimesRequest], + notebook_service.ListNotebookRuntimesResponse, + ]: + r"""Return a callable for the list notebook runtimes method over gRPC. + + Lists NotebookRuntimes in a Location. + + Returns: + Callable[[~.ListNotebookRuntimesRequest], + ~.ListNotebookRuntimesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notebook_runtimes" not in self._stubs: + self._stubs["list_notebook_runtimes"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/ListNotebookRuntimes", + request_serializer=notebook_service.ListNotebookRuntimesRequest.serialize, + response_deserializer=notebook_service.ListNotebookRuntimesResponse.deserialize, + ) + return self._stubs["list_notebook_runtimes"] + + @property + def delete_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete notebook runtime method over gRPC. + + Deletes a NotebookRuntime. + + Returns: + Callable[[~.DeleteNotebookRuntimeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_notebook_runtime" not in self._stubs: + self._stubs["delete_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/DeleteNotebookRuntime", + request_serializer=notebook_service.DeleteNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_notebook_runtime"] + + @property + def upgrade_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.UpgradeNotebookRuntimeRequest], operations_pb2.Operation + ]: + r"""Return a callable for the upgrade notebook runtime method over gRPC. + + Upgrades a NotebookRuntime. + + Returns: + Callable[[~.UpgradeNotebookRuntimeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "upgrade_notebook_runtime" not in self._stubs: + self._stubs["upgrade_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/UpgradeNotebookRuntime", + request_serializer=notebook_service.UpgradeNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["upgrade_notebook_runtime"] + + @property + def start_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.StartNotebookRuntimeRequest], operations_pb2.Operation + ]: + r"""Return a callable for the start notebook runtime method over gRPC. + + Starts a NotebookRuntime. + + Returns: + Callable[[~.StartNotebookRuntimeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "start_notebook_runtime" not in self._stubs: + self._stubs["start_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/StartNotebookRuntime", + request_serializer=notebook_service.StartNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["start_notebook_runtime"] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("NotebookServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..1f6ff513a7 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/grpc_asyncio.py @@ -0,0 +1,769 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import notebook_runtime +from google.cloud.aiplatform_v1beta1.types import notebook_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import NotebookServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import NotebookServiceGrpcTransport + + +class NotebookServiceGrpcAsyncIOTransport(NotebookServiceTransport): + """gRPC AsyncIO backend transport for NotebookService. + + The interface for Vertex Notebook service (a.k.a. Colab on + Workbench). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.CreateNotebookRuntimeTemplateRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create notebook runtime + template method over gRPC. + + Creates a NotebookRuntimeTemplate. + + Returns: + Callable[[~.CreateNotebookRuntimeTemplateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_notebook_runtime_template" not in self._stubs: + self._stubs[ + "create_notebook_runtime_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/CreateNotebookRuntimeTemplate", + request_serializer=notebook_service.CreateNotebookRuntimeTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_notebook_runtime_template"] + + @property + def get_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeTemplateRequest], + Awaitable[notebook_runtime.NotebookRuntimeTemplate], + ]: + r"""Return a callable for the get notebook runtime template method over gRPC. + + Gets a NotebookRuntimeTemplate. + + Returns: + Callable[[~.GetNotebookRuntimeTemplateRequest], + Awaitable[~.NotebookRuntimeTemplate]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notebook_runtime_template" not in self._stubs: + self._stubs[ + "get_notebook_runtime_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/GetNotebookRuntimeTemplate", + request_serializer=notebook_service.GetNotebookRuntimeTemplateRequest.serialize, + response_deserializer=notebook_runtime.NotebookRuntimeTemplate.deserialize, + ) + return self._stubs["get_notebook_runtime_template"] + + @property + def list_notebook_runtime_templates( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimeTemplatesRequest], + Awaitable[notebook_service.ListNotebookRuntimeTemplatesResponse], + ]: + r"""Return a callable for the list notebook runtime + templates method over gRPC. + + Lists NotebookRuntimeTemplates in a Location. + + Returns: + Callable[[~.ListNotebookRuntimeTemplatesRequest], + Awaitable[~.ListNotebookRuntimeTemplatesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notebook_runtime_templates" not in self._stubs: + self._stubs[ + "list_notebook_runtime_templates" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/ListNotebookRuntimeTemplates", + request_serializer=notebook_service.ListNotebookRuntimeTemplatesRequest.serialize, + response_deserializer=notebook_service.ListNotebookRuntimeTemplatesResponse.deserialize, + ) + return self._stubs["list_notebook_runtime_templates"] + + @property + def delete_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeTemplateRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete notebook runtime + template method over gRPC. + + Deletes a NotebookRuntimeTemplate. + + Returns: + Callable[[~.DeleteNotebookRuntimeTemplateRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_notebook_runtime_template" not in self._stubs: + self._stubs[ + "delete_notebook_runtime_template" + ] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/DeleteNotebookRuntimeTemplate", + request_serializer=notebook_service.DeleteNotebookRuntimeTemplateRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_notebook_runtime_template"] + + @property + def assign_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.AssignNotebookRuntimeRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the assign notebook runtime method over gRPC. + + Assigns a NotebookRuntime to a user for a particular + Notebook file. This method will either returns an + existing assignment or generates a new one. + + Returns: + Callable[[~.AssignNotebookRuntimeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "assign_notebook_runtime" not in self._stubs: + self._stubs["assign_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/AssignNotebookRuntime", + request_serializer=notebook_service.AssignNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["assign_notebook_runtime"] + + @property + def get_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeRequest], + Awaitable[notebook_runtime.NotebookRuntime], + ]: + r"""Return a callable for the get notebook runtime method over gRPC. + + Gets a NotebookRuntime. + + Returns: + Callable[[~.GetNotebookRuntimeRequest], + Awaitable[~.NotebookRuntime]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_notebook_runtime" not in self._stubs: + self._stubs["get_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/GetNotebookRuntime", + request_serializer=notebook_service.GetNotebookRuntimeRequest.serialize, + response_deserializer=notebook_runtime.NotebookRuntime.deserialize, + ) + return self._stubs["get_notebook_runtime"] + + @property + def list_notebook_runtimes( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimesRequest], + Awaitable[notebook_service.ListNotebookRuntimesResponse], + ]: + r"""Return a callable for the list notebook runtimes method over gRPC. + + Lists NotebookRuntimes in a Location. + + Returns: + Callable[[~.ListNotebookRuntimesRequest], + Awaitable[~.ListNotebookRuntimesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_notebook_runtimes" not in self._stubs: + self._stubs["list_notebook_runtimes"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/ListNotebookRuntimes", + request_serializer=notebook_service.ListNotebookRuntimesRequest.serialize, + response_deserializer=notebook_service.ListNotebookRuntimesResponse.deserialize, + ) + return self._stubs["list_notebook_runtimes"] + + @property + def delete_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete notebook runtime method over gRPC. + + Deletes a NotebookRuntime. + + Returns: + Callable[[~.DeleteNotebookRuntimeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_notebook_runtime" not in self._stubs: + self._stubs["delete_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/DeleteNotebookRuntime", + request_serializer=notebook_service.DeleteNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_notebook_runtime"] + + @property + def upgrade_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.UpgradeNotebookRuntimeRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the upgrade notebook runtime method over gRPC. + + Upgrades a NotebookRuntime. + + Returns: + Callable[[~.UpgradeNotebookRuntimeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "upgrade_notebook_runtime" not in self._stubs: + self._stubs["upgrade_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/UpgradeNotebookRuntime", + request_serializer=notebook_service.UpgradeNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["upgrade_notebook_runtime"] + + @property + def start_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.StartNotebookRuntimeRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the start notebook runtime method over gRPC. + + Starts a NotebookRuntime. + + Returns: + Callable[[~.StartNotebookRuntimeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "start_notebook_runtime" not in self._stubs: + self._stubs["start_notebook_runtime"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.NotebookService/StartNotebookRuntime", + request_serializer=notebook_service.StartNotebookRuntimeRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["start_notebook_runtime"] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("NotebookServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py new file mode 100644 index 0000000000..67640d90d9 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/notebook_service/transports/rest.py @@ -0,0 +1,6314 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import notebook_runtime +from google.cloud.aiplatform_v1beta1.types import notebook_service +from google.longrunning import operations_pb2 # type: ignore + +from .base import ( + NotebookServiceTransport, + DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, +) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class NotebookServiceRestInterceptor: + """Interceptor for NotebookService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the NotebookServiceRestTransport. + + .. code-block:: python + class MyCustomNotebookServiceInterceptor(NotebookServiceRestInterceptor): + def pre_assign_notebook_runtime(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_assign_notebook_runtime(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_notebook_runtime_template(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_notebook_runtime_template(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_notebook_runtime(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_notebook_runtime(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_notebook_runtime_template(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_notebook_runtime_template(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_notebook_runtime(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_notebook_runtime(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_notebook_runtime_template(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_notebook_runtime_template(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_notebook_runtimes(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_notebook_runtimes(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_notebook_runtime_templates(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_notebook_runtime_templates(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_start_notebook_runtime(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_start_notebook_runtime(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_upgrade_notebook_runtime(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_upgrade_notebook_runtime(self, response): + logging.log(f"Received response: {response}") + return response + + transport = NotebookServiceRestTransport(interceptor=MyCustomNotebookServiceInterceptor()) + client = NotebookServiceClient(transport=transport) + + + """ + + def pre_assign_notebook_runtime( + self, + request: notebook_service.AssignNotebookRuntimeRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.AssignNotebookRuntimeRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for assign_notebook_runtime + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_assign_notebook_runtime( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for assign_notebook_runtime + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_create_notebook_runtime_template( + self, + request: notebook_service.CreateNotebookRuntimeTemplateRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.CreateNotebookRuntimeTemplateRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for create_notebook_runtime_template + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_create_notebook_runtime_template( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_notebook_runtime_template + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_delete_notebook_runtime( + self, + request: notebook_service.DeleteNotebookRuntimeRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.DeleteNotebookRuntimeRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for delete_notebook_runtime + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_delete_notebook_runtime( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_notebook_runtime + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_delete_notebook_runtime_template( + self, + request: notebook_service.DeleteNotebookRuntimeTemplateRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.DeleteNotebookRuntimeTemplateRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for delete_notebook_runtime_template + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_delete_notebook_runtime_template( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_notebook_runtime_template + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_get_notebook_runtime( + self, + request: notebook_service.GetNotebookRuntimeRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[notebook_service.GetNotebookRuntimeRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_notebook_runtime + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_get_notebook_runtime( + self, response: notebook_runtime.NotebookRuntime + ) -> notebook_runtime.NotebookRuntime: + """Post-rpc interceptor for get_notebook_runtime + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_get_notebook_runtime_template( + self, + request: notebook_service.GetNotebookRuntimeTemplateRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.GetNotebookRuntimeTemplateRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for get_notebook_runtime_template + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_get_notebook_runtime_template( + self, response: notebook_runtime.NotebookRuntimeTemplate + ) -> notebook_runtime.NotebookRuntimeTemplate: + """Post-rpc interceptor for get_notebook_runtime_template + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_list_notebook_runtimes( + self, + request: notebook_service.ListNotebookRuntimesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[notebook_service.ListNotebookRuntimesRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_notebook_runtimes + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_list_notebook_runtimes( + self, response: notebook_service.ListNotebookRuntimesResponse + ) -> notebook_service.ListNotebookRuntimesResponse: + """Post-rpc interceptor for list_notebook_runtimes + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_list_notebook_runtime_templates( + self, + request: notebook_service.ListNotebookRuntimeTemplatesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.ListNotebookRuntimeTemplatesRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for list_notebook_runtime_templates + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_list_notebook_runtime_templates( + self, response: notebook_service.ListNotebookRuntimeTemplatesResponse + ) -> notebook_service.ListNotebookRuntimeTemplatesResponse: + """Post-rpc interceptor for list_notebook_runtime_templates + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_start_notebook_runtime( + self, + request: notebook_service.StartNotebookRuntimeRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[notebook_service.StartNotebookRuntimeRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for start_notebook_runtime + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_start_notebook_runtime( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for start_notebook_runtime + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_upgrade_notebook_runtime( + self, + request: notebook_service.UpgradeNotebookRuntimeRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + notebook_service.UpgradeNotebookRuntimeRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for upgrade_notebook_runtime + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_upgrade_notebook_runtime( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for upgrade_notebook_runtime + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.WaitOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the NotebookService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the NotebookService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class NotebookServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: NotebookServiceRestInterceptor + + +class NotebookServiceRestTransport(NotebookServiceTransport): + """REST backend transport for NotebookService. + + The interface for Vertex Notebook service (a.k.a. Colab on + Workbench). + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via an issue in this + library's source repository. Thank you! + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[NotebookServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via a GitHub issue in + this library's repository. Thank you! + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or NotebookServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", + }, + ], + "google.longrunning.Operations.WaitOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1beta1", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _AssignNotebookRuntime(NotebookServiceRestStub): + def __hash__(self): + return hash("AssignNotebookRuntime") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.AssignNotebookRuntimeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the assign notebook runtime method over HTTP. + + Args: + request (~.notebook_service.AssignNotebookRuntimeRequest): + The request object. Request message for + [NotebookService.AssignNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.AssignNotebookRuntime]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{parent=projects/*/locations/*}/notebookRuntimes:assign", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_assign_notebook_runtime( + request, metadata + ) + pb_request = notebook_service.AssignNotebookRuntimeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_assign_notebook_runtime(resp) + return resp + + class _CreateNotebookRuntimeTemplate(NotebookServiceRestStub): + def __hash__(self): + return hash("CreateNotebookRuntimeTemplate") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.CreateNotebookRuntimeTemplateRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create notebook runtime + template method over HTTP. + + Args: + request (~.notebook_service.CreateNotebookRuntimeTemplateRequest): + The request object. Request message for + [NotebookService.CreateNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.CreateNotebookRuntimeTemplate]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{parent=projects/*/locations/*}/notebookRuntimeTemplates", + "body": "notebook_runtime_template", + }, + ] + request, metadata = self._interceptor.pre_create_notebook_runtime_template( + request, metadata + ) + pb_request = notebook_service.CreateNotebookRuntimeTemplateRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_notebook_runtime_template(resp) + return resp + + class _DeleteNotebookRuntime(NotebookServiceRestStub): + def __hash__(self): + return hash("DeleteNotebookRuntime") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.DeleteNotebookRuntimeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete notebook runtime method over HTTP. + + Args: + request (~.notebook_service.DeleteNotebookRuntimeRequest): + The request object. Request message for + [NotebookService.DeleteNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.DeleteNotebookRuntime]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_notebook_runtime( + request, metadata + ) + pb_request = notebook_service.DeleteNotebookRuntimeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_notebook_runtime(resp) + return resp + + class _DeleteNotebookRuntimeTemplate(NotebookServiceRestStub): + def __hash__(self): + return hash("DeleteNotebookRuntimeTemplate") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.DeleteNotebookRuntimeTemplateRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete notebook runtime + template method over HTTP. + + Args: + request (~.notebook_service.DeleteNotebookRuntimeTemplateRequest): + The request object. Request message for + [NotebookService.DeleteNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.DeleteNotebookRuntimeTemplate]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_notebook_runtime_template( + request, metadata + ) + pb_request = notebook_service.DeleteNotebookRuntimeTemplateRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_notebook_runtime_template(resp) + return resp + + class _GetNotebookRuntime(NotebookServiceRestStub): + def __hash__(self): + return hash("GetNotebookRuntime") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.GetNotebookRuntimeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_runtime.NotebookRuntime: + r"""Call the get notebook runtime method over HTTP. + + Args: + request (~.notebook_service.GetNotebookRuntimeRequest): + The request object. Request message for + [NotebookService.GetNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.GetNotebookRuntime] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notebook_runtime.NotebookRuntime: + A runtime is a virtual machine + allocated to a particular user for a + particular Notebook file on temporary + basis with lifetime limited to 24 hours. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}", + }, + ] + request, metadata = self._interceptor.pre_get_notebook_runtime( + request, metadata + ) + pb_request = notebook_service.GetNotebookRuntimeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = notebook_runtime.NotebookRuntime() + pb_resp = notebook_runtime.NotebookRuntime.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_notebook_runtime(resp) + return resp + + class _GetNotebookRuntimeTemplate(NotebookServiceRestStub): + def __hash__(self): + return hash("GetNotebookRuntimeTemplate") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.GetNotebookRuntimeTemplateRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_runtime.NotebookRuntimeTemplate: + r"""Call the get notebook runtime + template method over HTTP. + + Args: + request (~.notebook_service.GetNotebookRuntimeTemplateRequest): + The request object. Request message for + [NotebookService.GetNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.GetNotebookRuntimeTemplate] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notebook_runtime.NotebookRuntimeTemplate: + A template that specifies runtime + configurations such as machine type, + runtime version, network configurations, + etc. Multiple runtimes can be created + from a runtime template. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}", + }, + ] + request, metadata = self._interceptor.pre_get_notebook_runtime_template( + request, metadata + ) + pb_request = notebook_service.GetNotebookRuntimeTemplateRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = notebook_runtime.NotebookRuntimeTemplate() + pb_resp = notebook_runtime.NotebookRuntimeTemplate.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_notebook_runtime_template(resp) + return resp + + class _ListNotebookRuntimes(NotebookServiceRestStub): + def __hash__(self): + return hash("ListNotebookRuntimes") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.ListNotebookRuntimesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_service.ListNotebookRuntimesResponse: + r"""Call the list notebook runtimes method over HTTP. + + Args: + request (~.notebook_service.ListNotebookRuntimesRequest): + The request object. Request message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimes]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notebook_service.ListNotebookRuntimesResponse: + Response message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimes]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{parent=projects/*/locations/*}/notebookRuntimes", + }, + ] + request, metadata = self._interceptor.pre_list_notebook_runtimes( + request, metadata + ) + pb_request = notebook_service.ListNotebookRuntimesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = notebook_service.ListNotebookRuntimesResponse() + pb_resp = notebook_service.ListNotebookRuntimesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_notebook_runtimes(resp) + return resp + + class _ListNotebookRuntimeTemplates(NotebookServiceRestStub): + def __hash__(self): + return hash("ListNotebookRuntimeTemplates") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.ListNotebookRuntimeTemplatesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> notebook_service.ListNotebookRuntimeTemplatesResponse: + r"""Call the list notebook runtime + templates method over HTTP. + + Args: + request (~.notebook_service.ListNotebookRuntimeTemplatesRequest): + The request object. Request message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimeTemplates]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.notebook_service.ListNotebookRuntimeTemplatesResponse: + Response message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimeTemplates]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{parent=projects/*/locations/*}/notebookRuntimeTemplates", + }, + ] + request, metadata = self._interceptor.pre_list_notebook_runtime_templates( + request, metadata + ) + pb_request = notebook_service.ListNotebookRuntimeTemplatesRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = notebook_service.ListNotebookRuntimeTemplatesResponse() + pb_resp = notebook_service.ListNotebookRuntimeTemplatesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_notebook_runtime_templates(resp) + return resp + + class _StartNotebookRuntime(NotebookServiceRestStub): + def __hash__(self): + return hash("StartNotebookRuntime") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.StartNotebookRuntimeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the start notebook runtime method over HTTP. + + Args: + request (~.notebook_service.StartNotebookRuntimeRequest): + The request object. Request message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.StartNotebookRuntime]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}:start", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_start_notebook_runtime( + request, metadata + ) + pb_request = notebook_service.StartNotebookRuntimeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_start_notebook_runtime(resp) + return resp + + class _UpgradeNotebookRuntime(NotebookServiceRestStub): + def __hash__(self): + return hash("UpgradeNotebookRuntime") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: notebook_service.UpgradeNotebookRuntimeRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the upgrade notebook runtime method over HTTP. + + Args: + request (~.notebook_service.UpgradeNotebookRuntimeRequest): + The request object. Request message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.UpgradeNotebookRuntime]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}:upgrade", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_upgrade_notebook_runtime( + request, metadata + ) + pb_request = notebook_service.UpgradeNotebookRuntimeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_upgrade_notebook_runtime(resp) + return resp + + @property + def assign_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.AssignNotebookRuntimeRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._AssignNotebookRuntime(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.CreateNotebookRuntimeTemplateRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateNotebookRuntimeTemplate(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteNotebookRuntime(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.DeleteNotebookRuntimeTemplateRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteNotebookRuntimeTemplate(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeRequest], notebook_runtime.NotebookRuntime + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetNotebookRuntime(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_notebook_runtime_template( + self, + ) -> Callable[ + [notebook_service.GetNotebookRuntimeTemplateRequest], + notebook_runtime.NotebookRuntimeTemplate, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetNotebookRuntimeTemplate(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_notebook_runtimes( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimesRequest], + notebook_service.ListNotebookRuntimesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListNotebookRuntimes(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_notebook_runtime_templates( + self, + ) -> Callable[ + [notebook_service.ListNotebookRuntimeTemplatesRequest], + notebook_service.ListNotebookRuntimeTemplatesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListNotebookRuntimeTemplates(self._session, self._host, self._interceptor) # type: ignore + + @property + def start_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.StartNotebookRuntimeRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._StartNotebookRuntime(self._session, self._host, self._interceptor) # type: ignore + + @property + def upgrade_notebook_runtime( + self, + ) -> Callable[ + [notebook_service.UpgradeNotebookRuntimeRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpgradeNotebookRuntime(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation(NotebookServiceRestStub): + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_location(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.Location() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_location(resp) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations(NotebookServiceRestStub): + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*}/locations", + }, + ] + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_locations(resp) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy(NotebookServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + ] + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy(NotebookServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + ] + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions(NotebookServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + ] + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(NotebookServiceRestStub): + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation(NotebookServiceRestStub): + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(NotebookServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(NotebookServiceRestStub): + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation(NotebookServiceRestStub): + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_wait_operation(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("NotebookServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py index ca74776957..522ed8f534 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/async_client.py @@ -85,6 +85,12 @@ class PersistentResourceServiceAsyncClient: parse_network_path = staticmethod( PersistentResourceServiceClient.parse_network_path ) + notebook_runtime_template_path = staticmethod( + PersistentResourceServiceClient.notebook_runtime_template_path + ) + parse_notebook_runtime_template_path = staticmethod( + PersistentResourceServiceClient.parse_notebook_runtime_template_path + ) persistent_resource_path = staticmethod( PersistentResourceServiceClient.persistent_resource_path ) @@ -944,6 +950,130 @@ async def sample_update_persistent_resource(): # Done; return the response. return response + async def reboot_persistent_resource( + self, + request: Optional[ + Union[persistent_resource_service.RebootPersistentResourceRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Reboots a PersistentResource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_reboot_persistent_resource(): + # Create a client + client = aiplatform_v1beta1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.RebootPersistentResourceRequest( + name="name_value", + ) + + # Make the request + operation = client.reboot_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.RebootPersistentResourceRequest, dict]]): + The request object. Request message for + [PersistentResourceService.RebootPersistentResource][google.cloud.aiplatform.v1beta1.PersistentResourceService.RebootPersistentResource]. + name (:class:`str`): + Required. The name of the PersistentResource resource. + Format: + ``projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.PersistentResource` Represents long-lasting resources that are dedicated to users to runs custom + workloads. A PersistentResource can have multiple + node pools and each node pool can have its own + machine spec. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = persistent_resource_service.RebootPersistentResourceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.reboot_persistent_resource, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + persistent_resource.PersistentResource, + metadata_type=persistent_resource_service.RebootPersistentResourceOperationMetadata, + ) + + # Done; return the response. + return response + async def list_operations( self, request: Optional[operations_pb2.ListOperationsRequest] = None, diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/client.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/client.py index f237876646..c32c05573a 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/client.py @@ -218,6 +218,28 @@ def parse_network_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def notebook_runtime_template_path( + project: str, + location: str, + notebook_runtime_template: str, + ) -> str: + """Returns a fully-qualified notebook_runtime_template string.""" + return "projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}".format( + project=project, + location=location, + notebook_runtime_template=notebook_runtime_template, + ) + + @staticmethod + def parse_notebook_runtime_template_path(path: str) -> Dict[str, str]: + """Parses a notebook_runtime_template path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/notebookRuntimeTemplates/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def persistent_resource_path( project: str, @@ -1389,6 +1411,136 @@ def sample_update_persistent_resource(): # Done; return the response. return response + def reboot_persistent_resource( + self, + request: Optional[ + Union[persistent_resource_service.RebootPersistentResourceRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Reboots a PersistentResource. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_reboot_persistent_resource(): + # Create a client + client = aiplatform_v1beta1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.RebootPersistentResourceRequest( + name="name_value", + ) + + # Make the request + operation = client.reboot_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.RebootPersistentResourceRequest, dict]): + The request object. Request message for + [PersistentResourceService.RebootPersistentResource][google.cloud.aiplatform.v1beta1.PersistentResourceService.RebootPersistentResource]. + name (str): + Required. The name of the PersistentResource resource. + Format: + ``projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.PersistentResource` Represents long-lasting resources that are dedicated to users to runs custom + workloads. A PersistentResource can have multiple + node pools and each node pool can have its own + machine spec. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a persistent_resource_service.RebootPersistentResourceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance( + request, persistent_resource_service.RebootPersistentResourceRequest + ): + request = persistent_resource_service.RebootPersistentResourceRequest( + request + ) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[ + self._transport.reboot_persistent_resource + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + persistent_resource.PersistentResource, + metadata_type=persistent_resource_service.RebootPersistentResourceOperationMetadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "PersistentResourceServiceClient": return self diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/base.py index a60544824a..eb89f44908 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/base.py @@ -158,6 +158,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.reboot_persistent_resource: gapic_v1.method.wrap_method( + self.reboot_persistent_resource, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -225,6 +230,15 @@ def update_persistent_resource( ]: raise NotImplementedError() + @property + def reboot_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.RebootPersistentResourceRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def list_operations( self, diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc.py index f1914ca69a..9b6bdd72a0 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc.py @@ -396,6 +396,35 @@ def update_persistent_resource( ) return self._stubs["update_persistent_resource"] + @property + def reboot_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.RebootPersistentResourceRequest], + operations_pb2.Operation, + ]: + r"""Return a callable for the reboot persistent resource method over gRPC. + + Reboots a PersistentResource. + + Returns: + Callable[[~.RebootPersistentResourceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "reboot_persistent_resource" not in self._stubs: + self._stubs["reboot_persistent_resource"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PersistentResourceService/RebootPersistentResource", + request_serializer=persistent_resource_service.RebootPersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["reboot_persistent_resource"] + def close(self): self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc_asyncio.py index 23ce4cd4c3..4c0d8706b3 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/grpc_asyncio.py @@ -401,6 +401,35 @@ def update_persistent_resource( ) return self._stubs["update_persistent_resource"] + @property + def reboot_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.RebootPersistentResourceRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the reboot persistent resource method over gRPC. + + Reboots a PersistentResource. + + Returns: + Callable[[~.RebootPersistentResourceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "reboot_persistent_resource" not in self._stubs: + self._stubs["reboot_persistent_resource"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.PersistentResourceService/RebootPersistentResource", + request_serializer=persistent_resource_service.RebootPersistentResourceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["reboot_persistent_resource"] + def close(self): return self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest.py index 61f9968940..975a8db392 100644 --- a/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/persistent_resource_service/transports/rest.py @@ -107,6 +107,14 @@ def post_list_persistent_resources(self, response): logging.log(f"Received response: {response}") return response + def pre_reboot_persistent_resource(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_reboot_persistent_resource(self, response): + logging.log(f"Received response: {response}") + return response + def pre_update_persistent_resource(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -225,6 +233,32 @@ def post_list_persistent_resources( """ return response + def pre_reboot_persistent_resource( + self, + request: persistent_resource_service.RebootPersistentResourceRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + persistent_resource_service.RebootPersistentResourceRequest, + Sequence[Tuple[str, str]], + ]: + """Pre-rpc interceptor for reboot_persistent_resource + + Override in a subclass to manipulate the request or metadata + before they are sent to the PersistentResourceService server. + """ + return request, metadata + + def post_reboot_persistent_resource( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for reboot_persistent_resource + + Override in a subclass to manipulate the response + after it is returned by the PersistentResourceService server but before + it is returned to user code. + """ + return response + def pre_update_persistent_resource( self, request: persistent_resource_service.UpdatePersistentResourceRequest, @@ -706,6 +740,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1036,6 +1074,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1414,6 +1456,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1800,6 +1846,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2044,10 +2094,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2178,6 +2224,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -2811,6 +2861,103 @@ def __call__( resp = self._interceptor.post_list_persistent_resources(resp) return resp + class _RebootPersistentResource(PersistentResourceServiceRestStub): + def __hash__(self): + return hash("RebootPersistentResource") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: persistent_resource_service.RebootPersistentResourceRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the reboot persistent + resource method over HTTP. + + Args: + request (~.persistent_resource_service.RebootPersistentResourceRequest): + The request object. Request message for + [PersistentResourceService.RebootPersistentResource][google.cloud.aiplatform.v1beta1.PersistentResourceService.RebootPersistentResource]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}:reboot", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_reboot_persistent_resource( + request, metadata + ) + pb_request = persistent_resource_service.RebootPersistentResourceRequest.pb( + request + ) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_reboot_persistent_resource(resp) + return resp + class _UpdatePersistentResource(PersistentResourceServiceRestStub): def __hash__(self): return hash("UpdatePersistentResource") @@ -2954,6 +3101,17 @@ def list_persistent_resources( # In C++ this would require a dynamic_cast return self._ListPersistentResources(self._session, self._host, self._interceptor) # type: ignore + @property + def reboot_persistent_resource( + self, + ) -> Callable[ + [persistent_resource_service.RebootPersistentResourceRequest], + operations_pb2.Operation, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._RebootPersistentResource(self._session, self._host, self._interceptor) # type: ignore + @property def update_persistent_resource( self, @@ -3639,6 +3797,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4026,6 +4188,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4464,6 +4630,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4911,6 +5081,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5155,10 +5329,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -5350,6 +5520,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py index 725e9f0343..461505401e 100644 --- a/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/rest.py @@ -898,6 +898,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1228,6 +1232,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1606,6 +1614,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1992,6 +2004,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2236,10 +2252,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2370,6 +2382,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -4497,6 +4513,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4884,6 +4904,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5322,6 +5346,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5769,6 +5797,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -6013,10 +6045,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -6208,6 +6236,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py index 1ada5e6407..28011cc656 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/async_client.py @@ -77,6 +77,8 @@ class PredictionServiceAsyncClient: parse_endpoint_path = staticmethod(PredictionServiceClient.parse_endpoint_path) model_path = staticmethod(PredictionServiceClient.model_path) parse_model_path = staticmethod(PredictionServiceClient.parse_model_path) + rag_corpus_path = staticmethod(PredictionServiceClient.rag_corpus_path) + parse_rag_corpus_path = staticmethod(PredictionServiceClient.parse_rag_corpus_path) common_billing_account_path = staticmethod( PredictionServiceClient.common_billing_account_path ) @@ -1795,6 +1797,170 @@ async def sample_stream_generate_content(): # Done; return the response. return response + def chat_completions( + self, + request: Optional[ + Union[prediction_service.ChatCompletionsRequest, dict] + ] = None, + *, + endpoint: Optional[str] = None, + http_body: Optional[httpbody_pb2.HttpBody] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[httpbody_pb2.HttpBody]]: + r"""Exposes an OpenAI-compatible endpoint for chat + completions. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_chat_completions(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ChatCompletionsRequest( + endpoint="endpoint_value", + ) + + # Make the request + stream = await client.chat_completions(request=request) + + # Handle the response + async for response in stream: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.ChatCompletionsRequest, dict]]): + The request object. Request message for [PredictionService.ChatCompletions] + endpoint (:class:`str`): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/openapi`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + http_body (:class:`google.api.httpbody_pb2.HttpBody`): + Optional. The prediction input. + Supports HTTP headers and arbitrary data + payload. + + This corresponds to the ``http_body`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.api.httpbody_pb2.HttpBody]: + Message that represents an arbitrary HTTP body. It should only be used for + payload formats that can't be represented as JSON, + such as raw binary or an HTML page. + + This message can be used both in streaming and + non-streaming API methods in the request as well as + the response. + + It can be used as a top-level request field, which is + convenient if one wants to extract parameters from + either the URL or HTTP template into the request + fields and also want access to the raw HTTP body. + + Example: + + message GetResourceRequest { + // A unique request id. string request_id = 1; + + // The raw HTTP body is bound to this field. + google.api.HttpBody http_body = 2; + + } + + service ResourceService { + rpc GetResource(GetResourceRequest) + returns (google.api.HttpBody); + + rpc UpdateResource(google.api.HttpBody) + returns (google.protobuf.Empty); + + } + + Example with streaming methods: + + service CaldavService { + rpc GetCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + rpc UpdateCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + } + + Use of this type only changes how the request and + response bodies are handled, all other features will + continue to work unchanged. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, http_body]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = prediction_service.ChatCompletionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if http_body is not None: + request.http_body = http_body + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.chat_completions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def list_operations( self, request: Optional[operations_pb2.ListOperationsRequest] = None, diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py index 7e754d43c3..5dfa236d01 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/client.py @@ -236,6 +236,28 @@ def parse_model_path(path: str) -> Dict[str, str]: ) return m.groupdict() if m else {} + @staticmethod + def rag_corpus_path( + project: str, + location: str, + rag_corpus: str, + ) -> str: + """Returns a fully-qualified rag_corpus string.""" + return "projects/{project}/locations/{location}/ragCorpora/{rag_corpus}".format( + project=project, + location=location, + rag_corpus=rag_corpus, + ) + + @staticmethod + def parse_rag_corpus_path(path: str) -> Dict[str, str]: + """Parses a rag_corpus path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/ragCorpora/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def common_billing_account_path( billing_account: str, @@ -2214,6 +2236,170 @@ def sample_stream_generate_content(): # Done; return the response. return response + def chat_completions( + self, + request: Optional[ + Union[prediction_service.ChatCompletionsRequest, dict] + ] = None, + *, + endpoint: Optional[str] = None, + http_body: Optional[httpbody_pb2.HttpBody] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[httpbody_pb2.HttpBody]: + r"""Exposes an OpenAI-compatible endpoint for chat + completions. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_chat_completions(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ChatCompletionsRequest( + endpoint="endpoint_value", + ) + + # Make the request + stream = client.chat_completions(request=request) + + # Handle the response + for response in stream: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ChatCompletionsRequest, dict]): + The request object. Request message for [PredictionService.ChatCompletions] + endpoint (str): + Required. The name of the Endpoint requested to serve + the prediction. Format: + ``projects/{project}/locations/{location}/endpoints/openapi`` + + This corresponds to the ``endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + http_body (google.api.httpbody_pb2.HttpBody): + Optional. The prediction input. + Supports HTTP headers and arbitrary data + payload. + + This corresponds to the ``http_body`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.api.httpbody_pb2.HttpBody]: + Message that represents an arbitrary HTTP body. It should only be used for + payload formats that can't be represented as JSON, + such as raw binary or an HTML page. + + This message can be used both in streaming and + non-streaming API methods in the request as well as + the response. + + It can be used as a top-level request field, which is + convenient if one wants to extract parameters from + either the URL or HTTP template into the request + fields and also want access to the raw HTTP body. + + Example: + + message GetResourceRequest { + // A unique request id. string request_id = 1; + + // The raw HTTP body is bound to this field. + google.api.HttpBody http_body = 2; + + } + + service ResourceService { + rpc GetResource(GetResourceRequest) + returns (google.api.HttpBody); + + rpc UpdateResource(google.api.HttpBody) + returns (google.protobuf.Empty); + + } + + Example with streaming methods: + + service CaldavService { + rpc GetCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + rpc UpdateCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + } + + Use of this type only changes how the request and + response bodies are handled, all other features will + continue to work unchanged. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([endpoint, http_body]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a prediction_service.ChatCompletionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, prediction_service.ChatCompletionsRequest): + request = prediction_service.ChatCompletionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if endpoint is not None: + request.endpoint = endpoint + if http_body is not None: + request.http_body = http_body + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.chat_completions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("endpoint", request.endpoint),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "PredictionServiceClient": return self diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py index 2979e418c3..44fb32635a 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/base.py @@ -197,6 +197,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=None, client_info=client_info, ), + self.chat_completions: gapic_v1.method.wrap_method( + self.chat_completions, + default_timeout=None, + client_info=client_info, + ), } def close(self): @@ -361,6 +366,15 @@ def stream_generate_content( ]: raise NotImplementedError() + @property + def chat_completions( + self, + ) -> Callable[ + [prediction_service.ChatCompletionsRequest], + Union[httpbody_pb2.HttpBody, Awaitable[httpbody_pb2.HttpBody]], + ]: + raise NotImplementedError() + @property def list_operations( self, diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py index f141c5b243..e40bf8e2b7 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc.py @@ -635,6 +635,33 @@ def stream_generate_content( ) return self._stubs["stream_generate_content"] + @property + def chat_completions( + self, + ) -> Callable[[prediction_service.ChatCompletionsRequest], httpbody_pb2.HttpBody]: + r"""Return a callable for the chat completions method over gRPC. + + Exposes an OpenAI-compatible endpoint for chat + completions. + + Returns: + Callable[[~.ChatCompletionsRequest], + ~.HttpBody]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "chat_completions" not in self._stubs: + self._stubs["chat_completions"] = self.grpc_channel.unary_stream( + "/google.cloud.aiplatform.v1beta1.PredictionService/ChatCompletions", + request_serializer=prediction_service.ChatCompletionsRequest.serialize, + response_deserializer=httpbody_pb2.HttpBody.FromString, + ) + return self._stubs["chat_completions"] + def close(self): self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py index aa491fadc2..27e0595610 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -643,6 +643,35 @@ def stream_generate_content( ) return self._stubs["stream_generate_content"] + @property + def chat_completions( + self, + ) -> Callable[ + [prediction_service.ChatCompletionsRequest], Awaitable[httpbody_pb2.HttpBody] + ]: + r"""Return a callable for the chat completions method over gRPC. + + Exposes an OpenAI-compatible endpoint for chat + completions. + + Returns: + Callable[[~.ChatCompletionsRequest], + Awaitable[~.HttpBody]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "chat_completions" not in self._stubs: + self._stubs["chat_completions"] = self.grpc_channel.unary_stream( + "/google.cloud.aiplatform.v1beta1.PredictionService/ChatCompletions", + request_serializer=prediction_service.ChatCompletionsRequest.serialize, + response_deserializer=httpbody_pb2.HttpBody.FromString, + ) + return self._stubs["chat_completions"] + def close(self): return self.grpc_channel.close() diff --git a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py index 51b77fdf57..7f907099ba 100644 --- a/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/prediction_service/transports/rest.py @@ -74,6 +74,14 @@ class PredictionServiceRestInterceptor: .. code-block:: python class MyCustomPredictionServiceInterceptor(PredictionServiceRestInterceptor): + def pre_chat_completions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_chat_completions(self, response): + logging.log(f"Received response: {response}") + return response + def pre_count_tokens(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -152,6 +160,29 @@ def post_stream_generate_content(self, response): """ + def pre_chat_completions( + self, + request: prediction_service.ChatCompletionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[prediction_service.ChatCompletionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for chat_completions + + Override in a subclass to manipulate the request or metadata + before they are sent to the PredictionService server. + """ + return request, metadata + + def post_chat_completions( + self, response: rest_streaming.ResponseIterator + ) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for chat_completions + + Override in a subclass to manipulate the response + after it is returned by the PredictionService server but before + it is returned to user code. + """ + return response + def pre_count_tokens( self, request: prediction_service.CountTokensRequest, @@ -685,6 +716,144 @@ def __init__( self._interceptor = interceptor or PredictionServiceRestInterceptor() self._prep_wrapped_messages(client_info) + class _ChatCompletions(PredictionServiceRestStub): + def __hash__(self): + return hash("ChatCompletions") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: prediction_service.ChatCompletionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> rest_streaming.ResponseIterator: + r"""Call the chat completions method over HTTP. + + Args: + request (~.prediction_service.ChatCompletionsRequest): + The request object. Request message for [PredictionService.ChatCompletions] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.httpbody_pb2.HttpBody: + Message that represents an arbitrary HTTP body. It + should only be used for payload formats that can't be + represented as JSON, such as raw binary or an HTML page. + + This message can be used both in streaming and + non-streaming API methods in the request as well as the + response. + + It can be used as a top-level request field, which is + convenient if one wants to extract parameters from + either the URL or HTTP template into the request fields + and also want access to the raw HTTP body. + + Example: + + :: + + message GetResourceRequest { + // A unique request id. + string request_id = 1; + + // The raw HTTP body is bound to this field. + google.api.HttpBody http_body = 2; + + } + + service ResourceService { + rpc GetResource(GetResourceRequest) + returns (google.api.HttpBody); + rpc UpdateResource(google.api.HttpBody) + returns (google.protobuf.Empty); + + } + + Example with streaming methods: + + :: + + service CaldavService { + rpc GetCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + rpc UpdateCalendar(stream google.api.HttpBody) + returns (stream google.api.HttpBody); + + } + + Use of this type only changes how the request and + response bodies are handled, all other features will + continue to work unchanged. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}/chat/completions", + "body": "http_body", + }, + ] + request, metadata = self._interceptor.pre_chat_completions( + request, metadata + ) + pb_request = prediction_service.ChatCompletionsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator(response, httpbody_pb2.HttpBody) + resp = self._interceptor.post_chat_completions(resp) + return resp + class _CountTokens(PredictionServiceRestStub): def __hash__(self): return hash("CountTokens") @@ -1672,6 +1841,14 @@ def __call__( "Method StreamingRawPredict is not available over REST transport" ) + @property + def chat_completions( + self, + ) -> Callable[[prediction_service.ChatCompletionsRequest], httpbody_pb2.HttpBody]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ChatCompletions(self._session, self._host, self._interceptor) # type: ignore + @property def count_tokens( self, @@ -2483,6 +2660,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -2870,6 +3051,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3308,6 +3493,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -3755,6 +3944,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -3999,10 +4192,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -4194,6 +4383,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py index 8556e9474a..333c49849e 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_execution_service/transports/rest.py @@ -1229,6 +1229,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1616,6 +1620,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2054,6 +2062,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2501,6 +2513,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2745,10 +2761,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -2940,6 +2952,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest.py index 16563ad602..970810f0a0 100644 --- a/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/reasoning_engine_service/transports/rest.py @@ -667,6 +667,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -997,6 +1001,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1375,6 +1383,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1761,6 +1773,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2005,10 +2021,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2139,6 +2151,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3481,6 +3497,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -3868,6 +3888,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4306,6 +4330,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4753,6 +4781,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -4997,10 +5029,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -5192,6 +5220,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/rest.py index 41331434df..33ef3a10ca 100644 --- a/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/schedule_service/transports/rest.py @@ -724,6 +724,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1054,6 +1058,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1432,6 +1440,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1818,6 +1830,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2062,10 +2078,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2196,6 +2208,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3796,6 +3812,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4183,6 +4203,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4621,6 +4645,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5068,6 +5096,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5312,10 +5344,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -5507,6 +5535,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/rest.py index 531c5cd615..d681d09c98 100644 --- a/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/specialist_pool_service/transports/rest.py @@ -705,6 +705,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1035,6 +1039,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1413,6 +1421,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1799,6 +1811,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2043,10 +2059,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2177,6 +2189,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -3625,6 +3641,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -4012,6 +4032,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4450,6 +4474,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -4897,6 +4925,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -5141,10 +5173,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -5336,6 +5364,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py index 715cb21119..780a6087b8 100644 --- a/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/tensorboard_service/transports/rest.py @@ -1534,6 +1534,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1864,6 +1868,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2242,6 +2250,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2628,6 +2640,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2872,10 +2888,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -3006,6 +3018,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -7078,6 +7094,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -7465,6 +7485,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -7903,6 +7927,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -8350,6 +8378,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -8594,10 +8626,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -8789,6 +8817,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/__init__.py new file mode 100644 index 0000000000..4c465b9c69 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import VertexRagDataServiceClient +from .async_client import VertexRagDataServiceAsyncClient + +__all__ = ( + "VertexRagDataServiceClient", + "VertexRagDataServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py new file mode 100644 index 0000000000..ce74571c15 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/async_client.py @@ -0,0 +1,2122 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import pagers +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import VertexRagDataServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import VertexRagDataServiceGrpcAsyncIOTransport +from .client import VertexRagDataServiceClient + + +class VertexRagDataServiceAsyncClient: + """A service for managing user data for RAG.""" + + _client: VertexRagDataServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = VertexRagDataServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = VertexRagDataServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = VertexRagDataServiceClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = VertexRagDataServiceClient._DEFAULT_UNIVERSE + + rag_corpus_path = staticmethod(VertexRagDataServiceClient.rag_corpus_path) + parse_rag_corpus_path = staticmethod( + VertexRagDataServiceClient.parse_rag_corpus_path + ) + rag_file_path = staticmethod(VertexRagDataServiceClient.rag_file_path) + parse_rag_file_path = staticmethod(VertexRagDataServiceClient.parse_rag_file_path) + common_billing_account_path = staticmethod( + VertexRagDataServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + VertexRagDataServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(VertexRagDataServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + VertexRagDataServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + VertexRagDataServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + VertexRagDataServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(VertexRagDataServiceClient.common_project_path) + parse_common_project_path = staticmethod( + VertexRagDataServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(VertexRagDataServiceClient.common_location_path) + parse_common_location_path = staticmethod( + VertexRagDataServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VertexRagDataServiceAsyncClient: The constructed client. + """ + return VertexRagDataServiceClient.from_service_account_info.__func__(VertexRagDataServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VertexRagDataServiceAsyncClient: The constructed client. + """ + return VertexRagDataServiceClient.from_service_account_file.__func__(VertexRagDataServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return VertexRagDataServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> VertexRagDataServiceTransport: + """Returns the transport used by the client instance. + + Returns: + VertexRagDataServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = functools.partial( + type(VertexRagDataServiceClient).get_transport_class, + type(VertexRagDataServiceClient), + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, VertexRagDataServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the vertex rag data service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.VertexRagDataServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = VertexRagDataServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_rag_corpus( + self, + request: Optional[ + Union[vertex_rag_data_service.CreateRagCorpusRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + rag_corpus: Optional[vertex_rag_data.RagCorpus] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a RagCorpus. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_create_rag_corpus(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + rag_corpus = aiplatform_v1beta1.RagCorpus() + rag_corpus.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateRagCorpusRequest( + parent="parent_value", + rag_corpus=rag_corpus, + ) + + # Make the request + operation = client.create_rag_corpus(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.CreateRagCorpusRequest, dict]]): + The request object. Request message for + [VertexRagDataService.CreateRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.CreateRagCorpus]. + parent (:class:`str`): + Required. The resource name of the Location to create + the RagCorpus in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + rag_corpus (:class:`google.cloud.aiplatform_v1beta1.types.RagCorpus`): + Required. The RagCorpus to create. + This corresponds to the ``rag_corpus`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.RagCorpus` A RagCorpus is a RagFile container and a project can have multiple + RagCorpora. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, rag_corpus]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = vertex_rag_data_service.CreateRagCorpusRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if rag_corpus is not None: + request.rag_corpus = rag_corpus + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_rag_corpus, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + vertex_rag_data.RagCorpus, + metadata_type=vertex_rag_data_service.CreateRagCorpusOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_rag_corpus( + self, + request: Optional[ + Union[vertex_rag_data_service.GetRagCorpusRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_data.RagCorpus: + r"""Gets a RagCorpus. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_get_rag_corpus(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetRagCorpusRequest( + name="name_value", + ) + + # Make the request + response = await client.get_rag_corpus(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.GetRagCorpusRequest, dict]]): + The request object. Request message for + [VertexRagDataService.GetRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.GetRagCorpus] + name (:class:`str`): + Required. The name of the RagCorpus resource. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.RagCorpus: + A RagCorpus is a RagFile container + and a project can have multiple + RagCorpora. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = vertex_rag_data_service.GetRagCorpusRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_rag_corpus, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_rag_corpora( + self, + request: Optional[ + Union[vertex_rag_data_service.ListRagCorporaRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListRagCorporaAsyncPager: + r"""Lists RagCorpora in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_list_rag_corpora(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListRagCorporaRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_rag_corpora(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.ListRagCorporaRequest, dict]]): + The request object. Request message for + [VertexRagDataService.ListRagCorpora][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagCorpora]. + parent (:class:`str`): + Required. The resource name of the Location from which + to list the RagCorpora. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.pagers.ListRagCorporaAsyncPager: + Response message for + [VertexRagDataService.ListRagCorpora][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagCorpora]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = vertex_rag_data_service.ListRagCorporaRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_rag_corpora, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListRagCorporaAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_rag_corpus( + self, + request: Optional[ + Union[vertex_rag_data_service.DeleteRagCorpusRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a RagCorpus. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_rag_corpus(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteRagCorpusRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_rag_corpus(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.DeleteRagCorpusRequest, dict]]): + The request object. Request message for + [VertexRagDataService.DeleteRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.DeleteRagCorpus]. + name (:class:`str`): + Required. The name of the RagCorpus resource to be + deleted. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = vertex_rag_data_service.DeleteRagCorpusRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_rag_corpus, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def upload_rag_file( + self, + request: Optional[ + Union[vertex_rag_data_service.UploadRagFileRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + rag_file: Optional[vertex_rag_data.RagFile] = None, + upload_rag_file_config: Optional[vertex_rag_data.UploadRagFileConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_data_service.UploadRagFileResponse: + r"""Upload a file into a RagCorpus. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_upload_rag_file(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + rag_file = aiplatform_v1beta1.RagFile() + rag_file.gcs_source.uris = ['uris_value1', 'uris_value2'] + rag_file.display_name = "display_name_value" + + request = aiplatform_v1beta1.UploadRagFileRequest( + parent="parent_value", + rag_file=rag_file, + ) + + # Make the request + response = await client.upload_rag_file(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.UploadRagFileRequest, dict]]): + The request object. Request message for + [VertexRagDataService.UploadRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile]. + parent (:class:`str`): + Required. The name of the RagCorpus resource into which + to upload the file. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + rag_file (:class:`google.cloud.aiplatform_v1beta1.types.RagFile`): + Required. The RagFile to upload. + This corresponds to the ``rag_file`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + upload_rag_file_config (:class:`google.cloud.aiplatform_v1beta1.types.UploadRagFileConfig`): + Required. The config for the RagFiles to be uploaded + into the RagCorpus. + [VertexRagDataService.UploadRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile]. + + This corresponds to the ``upload_rag_file_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.UploadRagFileResponse: + Response message for + [VertexRagDataService.UploadRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, rag_file, upload_rag_file_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = vertex_rag_data_service.UploadRagFileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if rag_file is not None: + request.rag_file = rag_file + if upload_rag_file_config is not None: + request.upload_rag_file_config = upload_rag_file_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.upload_rag_file, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def import_rag_files( + self, + request: Optional[ + Union[vertex_rag_data_service.ImportRagFilesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + import_rag_files_config: Optional[vertex_rag_data.ImportRagFilesConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Import files from Google Cloud Storage or Google + Drive into a RagCorpus. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_import_rag_files(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + import_rag_files_config = aiplatform_v1beta1.ImportRagFilesConfig() + import_rag_files_config.gcs_source.uris = ['uris_value1', 'uris_value2'] + + request = aiplatform_v1beta1.ImportRagFilesRequest( + parent="parent_value", + import_rag_files_config=import_rag_files_config, + ) + + # Make the request + operation = client.import_rag_files(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.ImportRagFilesRequest, dict]]): + The request object. Request message for + [VertexRagDataService.ImportRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles]. + parent (:class:`str`): + Required. The name of the RagCorpus resource into which + to import files. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + import_rag_files_config (:class:`google.cloud.aiplatform_v1beta1.types.ImportRagFilesConfig`): + Required. The config for the RagFiles to be synced and + imported into the RagCorpus. + [VertexRagDataService.ImportRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles]. + + This corresponds to the ``import_rag_files_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ImportRagFilesResponse` Response message for + [VertexRagDataService.ImportRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, import_rag_files_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = vertex_rag_data_service.ImportRagFilesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if import_rag_files_config is not None: + request.import_rag_files_config = import_rag_files_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.import_rag_files, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + vertex_rag_data_service.ImportRagFilesResponse, + metadata_type=vertex_rag_data_service.ImportRagFilesOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_rag_file( + self, + request: Optional[ + Union[vertex_rag_data_service.GetRagFileRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_data.RagFile: + r"""Gets a RagFile. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_get_rag_file(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetRagFileRequest( + name="name_value", + ) + + # Make the request + response = await client.get_rag_file(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.GetRagFileRequest, dict]]): + The request object. Request message for + [VertexRagDataService.GetRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.GetRagFile] + name (:class:`str`): + Required. The name of the RagFile resource. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.RagFile: + A RagFile contains user data for + chunking, embedding and indexing. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = vertex_rag_data_service.GetRagFileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_rag_file, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_rag_files( + self, + request: Optional[ + Union[vertex_rag_data_service.ListRagFilesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListRagFilesAsyncPager: + r"""Lists RagFiles in a RagCorpus. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_list_rag_files(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListRagFilesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_rag_files(request=request) + + # Handle the response + async for response in page_result: + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.ListRagFilesRequest, dict]]): + The request object. Request message for + [VertexRagDataService.ListRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagFiles]. + parent (:class:`str`): + Required. The resource name of the RagCorpus from which + to list the RagFiles. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.pagers.ListRagFilesAsyncPager: + Response message for + [VertexRagDataService.ListRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagFiles]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = vertex_rag_data_service.ListRagFilesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_rag_files, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListRagFilesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_rag_file( + self, + request: Optional[ + Union[vertex_rag_data_service.DeleteRagFileRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Deletes a RagFile. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_delete_rag_file(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteRagFileRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_rag_file(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.DeleteRagFileRequest, dict]]): + The request object. Request message for + [VertexRagDataService.DeleteRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.DeleteRagFile]. + name (:class:`str`): + Required. The name of the RagFile resource to be + deleted. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = vertex_rag_data_service.DeleteRagFileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_rag_file, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "VertexRagDataServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("VertexRagDataServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py new file mode 100644 index 0000000000..6f13baed26 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/client.py @@ -0,0 +1,2561 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.api_core import operation as gac_operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import pagers +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from .transports.base import VertexRagDataServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import VertexRagDataServiceGrpcTransport +from .transports.grpc_asyncio import VertexRagDataServiceGrpcAsyncIOTransport +from .transports.rest import VertexRagDataServiceRestTransport + + +class VertexRagDataServiceClientMeta(type): + """Metaclass for the VertexRagDataService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[VertexRagDataServiceTransport]] + _transport_registry["grpc"] = VertexRagDataServiceGrpcTransport + _transport_registry["grpc_asyncio"] = VertexRagDataServiceGrpcAsyncIOTransport + _transport_registry["rest"] = VertexRagDataServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[VertexRagDataServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class VertexRagDataServiceClient(metaclass=VertexRagDataServiceClientMeta): + """A service for managing user data for RAG.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VertexRagDataServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VertexRagDataServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> VertexRagDataServiceTransport: + """Returns the transport used by the client instance. + + Returns: + VertexRagDataServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def rag_corpus_path( + project: str, + location: str, + rag_corpus: str, + ) -> str: + """Returns a fully-qualified rag_corpus string.""" + return "projects/{project}/locations/{location}/ragCorpora/{rag_corpus}".format( + project=project, + location=location, + rag_corpus=rag_corpus, + ) + + @staticmethod + def parse_rag_corpus_path(path: str) -> Dict[str, str]: + """Parses a rag_corpus path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/ragCorpora/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def rag_file_path( + project: str, + location: str, + rag_corpus: str, + rag_file: str, + ) -> str: + """Returns a fully-qualified rag_file string.""" + return "projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}".format( + project=project, + location=location, + rag_corpus=rag_corpus, + rag_file=rag_file, + ) + + @staticmethod + def parse_rag_file_path(path: str) -> Dict[str, str]: + """Parses a rag_file path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/locations/(?P.+?)/ragCorpora/(?P.+?)/ragFiles/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = VertexRagDataServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = VertexRagDataServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = VertexRagDataServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = VertexRagDataServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = VertexRagDataServiceClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or VertexRagDataServiceClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, VertexRagDataServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the vertex rag data service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, VertexRagDataServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = VertexRagDataServiceClient._read_environment_variables() + self._client_cert_source = VertexRagDataServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = VertexRagDataServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, VertexRagDataServiceTransport) + if transport_provided: + # transport is a VertexRagDataServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(VertexRagDataServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or VertexRagDataServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(cast(str, transport)) + self._transport = Transport( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + def create_rag_corpus( + self, + request: Optional[ + Union[vertex_rag_data_service.CreateRagCorpusRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + rag_corpus: Optional[vertex_rag_data.RagCorpus] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Creates a RagCorpus. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_create_rag_corpus(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + rag_corpus = aiplatform_v1beta1.RagCorpus() + rag_corpus.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateRagCorpusRequest( + parent="parent_value", + rag_corpus=rag_corpus, + ) + + # Make the request + operation = client.create_rag_corpus(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.CreateRagCorpusRequest, dict]): + The request object. Request message for + [VertexRagDataService.CreateRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.CreateRagCorpus]. + parent (str): + Required. The resource name of the Location to create + the RagCorpus in. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + rag_corpus (google.cloud.aiplatform_v1beta1.types.RagCorpus): + Required. The RagCorpus to create. + This corresponds to the ``rag_corpus`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.RagCorpus` A RagCorpus is a RagFile container and a project can have multiple + RagCorpora. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, rag_corpus]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a vertex_rag_data_service.CreateRagCorpusRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vertex_rag_data_service.CreateRagCorpusRequest): + request = vertex_rag_data_service.CreateRagCorpusRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if rag_corpus is not None: + request.rag_corpus = rag_corpus + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_rag_corpus] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + vertex_rag_data.RagCorpus, + metadata_type=vertex_rag_data_service.CreateRagCorpusOperationMetadata, + ) + + # Done; return the response. + return response + + def get_rag_corpus( + self, + request: Optional[ + Union[vertex_rag_data_service.GetRagCorpusRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_data.RagCorpus: + r"""Gets a RagCorpus. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_get_rag_corpus(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetRagCorpusRequest( + name="name_value", + ) + + # Make the request + response = client.get_rag_corpus(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetRagCorpusRequest, dict]): + The request object. Request message for + [VertexRagDataService.GetRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.GetRagCorpus] + name (str): + Required. The name of the RagCorpus resource. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.RagCorpus: + A RagCorpus is a RagFile container + and a project can have multiple + RagCorpora. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a vertex_rag_data_service.GetRagCorpusRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vertex_rag_data_service.GetRagCorpusRequest): + request = vertex_rag_data_service.GetRagCorpusRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_rag_corpus] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_rag_corpora( + self, + request: Optional[ + Union[vertex_rag_data_service.ListRagCorporaRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListRagCorporaPager: + r"""Lists RagCorpora in a Location. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_list_rag_corpora(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListRagCorporaRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_rag_corpora(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListRagCorporaRequest, dict]): + The request object. Request message for + [VertexRagDataService.ListRagCorpora][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagCorpora]. + parent (str): + Required. The resource name of the Location from which + to list the RagCorpora. Format: + ``projects/{project}/locations/{location}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.pagers.ListRagCorporaPager: + Response message for + [VertexRagDataService.ListRagCorpora][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagCorpora]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a vertex_rag_data_service.ListRagCorporaRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vertex_rag_data_service.ListRagCorporaRequest): + request = vertex_rag_data_service.ListRagCorporaRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_rag_corpora] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListRagCorporaPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_rag_corpus( + self, + request: Optional[ + Union[vertex_rag_data_service.DeleteRagCorpusRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a RagCorpus. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_delete_rag_corpus(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteRagCorpusRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_rag_corpus(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteRagCorpusRequest, dict]): + The request object. Request message for + [VertexRagDataService.DeleteRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.DeleteRagCorpus]. + name (str): + Required. The name of the RagCorpus resource to be + deleted. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a vertex_rag_data_service.DeleteRagCorpusRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vertex_rag_data_service.DeleteRagCorpusRequest): + request = vertex_rag_data_service.DeleteRagCorpusRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_rag_corpus] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def upload_rag_file( + self, + request: Optional[ + Union[vertex_rag_data_service.UploadRagFileRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + rag_file: Optional[vertex_rag_data.RagFile] = None, + upload_rag_file_config: Optional[vertex_rag_data.UploadRagFileConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_data_service.UploadRagFileResponse: + r"""Upload a file into a RagCorpus. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_upload_rag_file(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + rag_file = aiplatform_v1beta1.RagFile() + rag_file.gcs_source.uris = ['uris_value1', 'uris_value2'] + rag_file.display_name = "display_name_value" + + request = aiplatform_v1beta1.UploadRagFileRequest( + parent="parent_value", + rag_file=rag_file, + ) + + # Make the request + response = client.upload_rag_file(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.UploadRagFileRequest, dict]): + The request object. Request message for + [VertexRagDataService.UploadRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile]. + parent (str): + Required. The name of the RagCorpus resource into which + to upload the file. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + rag_file (google.cloud.aiplatform_v1beta1.types.RagFile): + Required. The RagFile to upload. + This corresponds to the ``rag_file`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + upload_rag_file_config (google.cloud.aiplatform_v1beta1.types.UploadRagFileConfig): + Required. The config for the RagFiles to be uploaded + into the RagCorpus. + [VertexRagDataService.UploadRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile]. + + This corresponds to the ``upload_rag_file_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.UploadRagFileResponse: + Response message for + [VertexRagDataService.UploadRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, rag_file, upload_rag_file_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a vertex_rag_data_service.UploadRagFileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vertex_rag_data_service.UploadRagFileRequest): + request = vertex_rag_data_service.UploadRagFileRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if rag_file is not None: + request.rag_file = rag_file + if upload_rag_file_config is not None: + request.upload_rag_file_config = upload_rag_file_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.upload_rag_file] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def import_rag_files( + self, + request: Optional[ + Union[vertex_rag_data_service.ImportRagFilesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + import_rag_files_config: Optional[vertex_rag_data.ImportRagFilesConfig] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Import files from Google Cloud Storage or Google + Drive into a RagCorpus. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_import_rag_files(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + import_rag_files_config = aiplatform_v1beta1.ImportRagFilesConfig() + import_rag_files_config.gcs_source.uris = ['uris_value1', 'uris_value2'] + + request = aiplatform_v1beta1.ImportRagFilesRequest( + parent="parent_value", + import_rag_files_config=import_rag_files_config, + ) + + # Make the request + operation = client.import_rag_files(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ImportRagFilesRequest, dict]): + The request object. Request message for + [VertexRagDataService.ImportRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles]. + parent (str): + Required. The name of the RagCorpus resource into which + to import files. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + import_rag_files_config (google.cloud.aiplatform_v1beta1.types.ImportRagFilesConfig): + Required. The config for the RagFiles to be synced and + imported into the RagCorpus. + [VertexRagDataService.ImportRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles]. + + This corresponds to the ``import_rag_files_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.ImportRagFilesResponse` Response message for + [VertexRagDataService.ImportRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, import_rag_files_config]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a vertex_rag_data_service.ImportRagFilesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vertex_rag_data_service.ImportRagFilesRequest): + request = vertex_rag_data_service.ImportRagFilesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if import_rag_files_config is not None: + request.import_rag_files_config = import_rag_files_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.import_rag_files] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + vertex_rag_data_service.ImportRagFilesResponse, + metadata_type=vertex_rag_data_service.ImportRagFilesOperationMetadata, + ) + + # Done; return the response. + return response + + def get_rag_file( + self, + request: Optional[ + Union[vertex_rag_data_service.GetRagFileRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_data.RagFile: + r"""Gets a RagFile. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_get_rag_file(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetRagFileRequest( + name="name_value", + ) + + # Make the request + response = client.get_rag_file(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.GetRagFileRequest, dict]): + The request object. Request message for + [VertexRagDataService.GetRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.GetRagFile] + name (str): + Required. The name of the RagFile resource. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.RagFile: + A RagFile contains user data for + chunking, embedding and indexing. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a vertex_rag_data_service.GetRagFileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vertex_rag_data_service.GetRagFileRequest): + request = vertex_rag_data_service.GetRagFileRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_rag_file] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_rag_files( + self, + request: Optional[ + Union[vertex_rag_data_service.ListRagFilesRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListRagFilesPager: + r"""Lists RagFiles in a RagCorpus. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_list_rag_files(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListRagFilesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_rag_files(request=request) + + # Handle the response + for response in page_result: + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.ListRagFilesRequest, dict]): + The request object. Request message for + [VertexRagDataService.ListRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagFiles]. + parent (str): + Required. The resource name of the RagCorpus from which + to list the RagFiles. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.pagers.ListRagFilesPager: + Response message for + [VertexRagDataService.ListRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagFiles]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a vertex_rag_data_service.ListRagFilesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vertex_rag_data_service.ListRagFilesRequest): + request = vertex_rag_data_service.ListRagFilesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_rag_files] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListRagFilesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_rag_file( + self, + request: Optional[ + Union[vertex_rag_data_service.DeleteRagFileRequest, dict] + ] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gac_operation.Operation: + r"""Deletes a RagFile. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_delete_rag_file(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteRagFileRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_rag_file(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.DeleteRagFileRequest, dict]): + The request object. Request message for + [VertexRagDataService.DeleteRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.DeleteRagFile]. + name (str): + Required. The name of the RagFile resource to be + deleted. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: + + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); + + } + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a vertex_rag_data_service.DeleteRagFileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vertex_rag_data_service.DeleteRagFileRequest): + request = vertex_rag_data_service.DeleteRagFileRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_rag_file] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = gac_operation.from_gapic( + response, + self._transport.operations_client, + empty_pb2.Empty, + metadata_type=gca_operation.DeleteOperationMetadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "VertexRagDataServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("VertexRagDataServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/pagers.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/pagers.py new file mode 100644 index 0000000000..ff69b42cc4 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/pagers.py @@ -0,0 +1,290 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import ( + Any, + AsyncIterator, + Awaitable, + Callable, + Sequence, + Tuple, + Optional, + Iterator, +) + +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data_service + + +class ListRagCorporaPager: + """A pager for iterating through ``list_rag_corpora`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListRagCorporaResponse` object, and + provides an ``__iter__`` method to iterate through its + ``rag_corpora`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListRagCorpora`` requests and continue to iterate + through the ``rag_corpora`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListRagCorporaResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., vertex_rag_data_service.ListRagCorporaResponse], + request: vertex_rag_data_service.ListRagCorporaRequest, + response: vertex_rag_data_service.ListRagCorporaResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListRagCorporaRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListRagCorporaResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vertex_rag_data_service.ListRagCorporaRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[vertex_rag_data_service.ListRagCorporaResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[vertex_rag_data.RagCorpus]: + for page in self.pages: + yield from page.rag_corpora + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListRagCorporaAsyncPager: + """A pager for iterating through ``list_rag_corpora`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListRagCorporaResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``rag_corpora`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListRagCorpora`` requests and continue to iterate + through the ``rag_corpora`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListRagCorporaResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[ + ..., Awaitable[vertex_rag_data_service.ListRagCorporaResponse] + ], + request: vertex_rag_data_service.ListRagCorporaRequest, + response: vertex_rag_data_service.ListRagCorporaResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListRagCorporaRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListRagCorporaResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vertex_rag_data_service.ListRagCorporaRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[vertex_rag_data_service.ListRagCorporaResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[vertex_rag_data.RagCorpus]: + async def async_generator(): + async for page in self.pages: + for response in page.rag_corpora: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListRagFilesPager: + """A pager for iterating through ``list_rag_files`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListRagFilesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``rag_files`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListRagFiles`` requests and continue to iterate + through the ``rag_files`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListRagFilesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., vertex_rag_data_service.ListRagFilesResponse], + request: vertex_rag_data_service.ListRagFilesRequest, + response: vertex_rag_data_service.ListRagFilesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListRagFilesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListRagFilesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vertex_rag_data_service.ListRagFilesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[vertex_rag_data_service.ListRagFilesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[vertex_rag_data.RagFile]: + for page in self.pages: + yield from page.rag_files + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) + + +class ListRagFilesAsyncPager: + """A pager for iterating through ``list_rag_files`` requests. + + This class thinly wraps an initial + :class:`google.cloud.aiplatform_v1beta1.types.ListRagFilesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``rag_files`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListRagFiles`` requests and continue to iterate + through the ``rag_files`` field on the + corresponding responses. + + All the usual :class:`google.cloud.aiplatform_v1beta1.types.ListRagFilesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + + def __init__( + self, + method: Callable[..., Awaitable[vertex_rag_data_service.ListRagFilesResponse]], + request: vertex_rag_data_service.ListRagFilesRequest, + response: vertex_rag_data_service.ListRagFilesResponse, + *, + metadata: Sequence[Tuple[str, str]] = () + ): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.aiplatform_v1beta1.types.ListRagFilesRequest): + The initial request object. + response (google.cloud.aiplatform_v1beta1.types.ListRagFilesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = vertex_rag_data_service.ListRagFilesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages( + self, + ) -> AsyncIterator[vertex_rag_data_service.ListRagFilesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterator[vertex_rag_data.RagFile]: + async def async_generator(): + async for page in self.pages: + for response in page.rag_files: + yield response + + return async_generator() + + def __repr__(self) -> str: + return "{0}<{1!r}>".format(self.__class__.__name__, self._response) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/__init__.py new file mode 100644 index 0000000000..6de2e6fca2 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/__init__.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import VertexRagDataServiceTransport +from .grpc import VertexRagDataServiceGrpcTransport +from .grpc_asyncio import VertexRagDataServiceGrpcAsyncIOTransport +from .rest import VertexRagDataServiceRestTransport +from .rest import VertexRagDataServiceRestInterceptor + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[VertexRagDataServiceTransport]] +_transport_registry["grpc"] = VertexRagDataServiceGrpcTransport +_transport_registry["grpc_asyncio"] = VertexRagDataServiceGrpcAsyncIOTransport +_transport_registry["rest"] = VertexRagDataServiceRestTransport + +__all__ = ( + "VertexRagDataServiceTransport", + "VertexRagDataServiceGrpcTransport", + "VertexRagDataServiceGrpcAsyncIOTransport", + "VertexRagDataServiceRestTransport", + "VertexRagDataServiceRestInterceptor", +) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/base.py new file mode 100644 index 0000000000..796c4fa57f --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/base.py @@ -0,0 +1,385 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class VertexRagDataServiceTransport(abc.ABC): + """Abstract transport class for VertexRagDataService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_rag_corpus: gapic_v1.method.wrap_method( + self.create_rag_corpus, + default_timeout=None, + client_info=client_info, + ), + self.get_rag_corpus: gapic_v1.method.wrap_method( + self.get_rag_corpus, + default_timeout=None, + client_info=client_info, + ), + self.list_rag_corpora: gapic_v1.method.wrap_method( + self.list_rag_corpora, + default_timeout=None, + client_info=client_info, + ), + self.delete_rag_corpus: gapic_v1.method.wrap_method( + self.delete_rag_corpus, + default_timeout=None, + client_info=client_info, + ), + self.upload_rag_file: gapic_v1.method.wrap_method( + self.upload_rag_file, + default_timeout=None, + client_info=client_info, + ), + self.import_rag_files: gapic_v1.method.wrap_method( + self.import_rag_files, + default_timeout=None, + client_info=client_info, + ), + self.get_rag_file: gapic_v1.method.wrap_method( + self.get_rag_file, + default_timeout=None, + client_info=client_info, + ), + self.list_rag_files: gapic_v1.method.wrap_method( + self.list_rag_files, + default_timeout=None, + client_info=client_info, + ), + self.delete_rag_file: gapic_v1.method.wrap_method( + self.delete_rag_file, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_rag_corpus( + self, + ) -> Callable[ + [vertex_rag_data_service.CreateRagCorpusRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_rag_corpus( + self, + ) -> Callable[ + [vertex_rag_data_service.GetRagCorpusRequest], + Union[vertex_rag_data.RagCorpus, Awaitable[vertex_rag_data.RagCorpus]], + ]: + raise NotImplementedError() + + @property + def list_rag_corpora( + self, + ) -> Callable[ + [vertex_rag_data_service.ListRagCorporaRequest], + Union[ + vertex_rag_data_service.ListRagCorporaResponse, + Awaitable[vertex_rag_data_service.ListRagCorporaResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_rag_corpus( + self, + ) -> Callable[ + [vertex_rag_data_service.DeleteRagCorpusRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def upload_rag_file( + self, + ) -> Callable[ + [vertex_rag_data_service.UploadRagFileRequest], + Union[ + vertex_rag_data_service.UploadRagFileResponse, + Awaitable[vertex_rag_data_service.UploadRagFileResponse], + ], + ]: + raise NotImplementedError() + + @property + def import_rag_files( + self, + ) -> Callable[ + [vertex_rag_data_service.ImportRagFilesRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_rag_file( + self, + ) -> Callable[ + [vertex_rag_data_service.GetRagFileRequest], + Union[vertex_rag_data.RagFile, Awaitable[vertex_rag_data.RagFile]], + ]: + raise NotImplementedError() + + @property + def list_rag_files( + self, + ) -> Callable[ + [vertex_rag_data_service.ListRagFilesRequest], + Union[ + vertex_rag_data_service.ListRagFilesResponse, + Awaitable[vertex_rag_data_service.ListRagFilesResponse], + ], + ]: + raise NotImplementedError() + + @property + def delete_rag_file( + self, + ) -> Callable[ + [vertex_rag_data_service.DeleteRagFileRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("VertexRagDataServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc.py new file mode 100644 index 0000000000..a8af2ddbc6 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc.py @@ -0,0 +1,718 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import VertexRagDataServiceTransport, DEFAULT_CLIENT_INFO + + +class VertexRagDataServiceGrpcTransport(VertexRagDataServiceTransport): + """gRPC backend transport for VertexRagDataService. + + A service for managing user data for RAG. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_rag_corpus( + self, + ) -> Callable[ + [vertex_rag_data_service.CreateRagCorpusRequest], operations_pb2.Operation + ]: + r"""Return a callable for the create rag corpus method over gRPC. + + Creates a RagCorpus. + + Returns: + Callable[[~.CreateRagCorpusRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_rag_corpus" not in self._stubs: + self._stubs["create_rag_corpus"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/CreateRagCorpus", + request_serializer=vertex_rag_data_service.CreateRagCorpusRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_rag_corpus"] + + @property + def get_rag_corpus( + self, + ) -> Callable[ + [vertex_rag_data_service.GetRagCorpusRequest], vertex_rag_data.RagCorpus + ]: + r"""Return a callable for the get rag corpus method over gRPC. + + Gets a RagCorpus. + + Returns: + Callable[[~.GetRagCorpusRequest], + ~.RagCorpus]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_rag_corpus" not in self._stubs: + self._stubs["get_rag_corpus"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/GetRagCorpus", + request_serializer=vertex_rag_data_service.GetRagCorpusRequest.serialize, + response_deserializer=vertex_rag_data.RagCorpus.deserialize, + ) + return self._stubs["get_rag_corpus"] + + @property + def list_rag_corpora( + self, + ) -> Callable[ + [vertex_rag_data_service.ListRagCorporaRequest], + vertex_rag_data_service.ListRagCorporaResponse, + ]: + r"""Return a callable for the list rag corpora method over gRPC. + + Lists RagCorpora in a Location. + + Returns: + Callable[[~.ListRagCorporaRequest], + ~.ListRagCorporaResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_rag_corpora" not in self._stubs: + self._stubs["list_rag_corpora"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/ListRagCorpora", + request_serializer=vertex_rag_data_service.ListRagCorporaRequest.serialize, + response_deserializer=vertex_rag_data_service.ListRagCorporaResponse.deserialize, + ) + return self._stubs["list_rag_corpora"] + + @property + def delete_rag_corpus( + self, + ) -> Callable[ + [vertex_rag_data_service.DeleteRagCorpusRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete rag corpus method over gRPC. + + Deletes a RagCorpus. + + Returns: + Callable[[~.DeleteRagCorpusRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_rag_corpus" not in self._stubs: + self._stubs["delete_rag_corpus"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/DeleteRagCorpus", + request_serializer=vertex_rag_data_service.DeleteRagCorpusRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_rag_corpus"] + + @property + def upload_rag_file( + self, + ) -> Callable[ + [vertex_rag_data_service.UploadRagFileRequest], + vertex_rag_data_service.UploadRagFileResponse, + ]: + r"""Return a callable for the upload rag file method over gRPC. + + Upload a file into a RagCorpus. + + Returns: + Callable[[~.UploadRagFileRequest], + ~.UploadRagFileResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "upload_rag_file" not in self._stubs: + self._stubs["upload_rag_file"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/UploadRagFile", + request_serializer=vertex_rag_data_service.UploadRagFileRequest.serialize, + response_deserializer=vertex_rag_data_service.UploadRagFileResponse.deserialize, + ) + return self._stubs["upload_rag_file"] + + @property + def import_rag_files( + self, + ) -> Callable[ + [vertex_rag_data_service.ImportRagFilesRequest], operations_pb2.Operation + ]: + r"""Return a callable for the import rag files method over gRPC. + + Import files from Google Cloud Storage or Google + Drive into a RagCorpus. + + Returns: + Callable[[~.ImportRagFilesRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_rag_files" not in self._stubs: + self._stubs["import_rag_files"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/ImportRagFiles", + request_serializer=vertex_rag_data_service.ImportRagFilesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["import_rag_files"] + + @property + def get_rag_file( + self, + ) -> Callable[[vertex_rag_data_service.GetRagFileRequest], vertex_rag_data.RagFile]: + r"""Return a callable for the get rag file method over gRPC. + + Gets a RagFile. + + Returns: + Callable[[~.GetRagFileRequest], + ~.RagFile]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_rag_file" not in self._stubs: + self._stubs["get_rag_file"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/GetRagFile", + request_serializer=vertex_rag_data_service.GetRagFileRequest.serialize, + response_deserializer=vertex_rag_data.RagFile.deserialize, + ) + return self._stubs["get_rag_file"] + + @property + def list_rag_files( + self, + ) -> Callable[ + [vertex_rag_data_service.ListRagFilesRequest], + vertex_rag_data_service.ListRagFilesResponse, + ]: + r"""Return a callable for the list rag files method over gRPC. + + Lists RagFiles in a RagCorpus. + + Returns: + Callable[[~.ListRagFilesRequest], + ~.ListRagFilesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_rag_files" not in self._stubs: + self._stubs["list_rag_files"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/ListRagFiles", + request_serializer=vertex_rag_data_service.ListRagFilesRequest.serialize, + response_deserializer=vertex_rag_data_service.ListRagFilesResponse.deserialize, + ) + return self._stubs["list_rag_files"] + + @property + def delete_rag_file( + self, + ) -> Callable[ + [vertex_rag_data_service.DeleteRagFileRequest], operations_pb2.Operation + ]: + r"""Return a callable for the delete rag file method over gRPC. + + Deletes a RagFile. + + Returns: + Callable[[~.DeleteRagFileRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_rag_file" not in self._stubs: + self._stubs["delete_rag_file"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/DeleteRagFile", + request_serializer=vertex_rag_data_service.DeleteRagFileRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_rag_file"] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("VertexRagDataServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..fb5ef8e06d --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/grpc_asyncio.py @@ -0,0 +1,726 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import VertexRagDataServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import VertexRagDataServiceGrpcTransport + + +class VertexRagDataServiceGrpcAsyncIOTransport(VertexRagDataServiceTransport): + """gRPC AsyncIO backend transport for VertexRagDataService. + + A service for managing user data for RAG. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_rag_corpus( + self, + ) -> Callable[ + [vertex_rag_data_service.CreateRagCorpusRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the create rag corpus method over gRPC. + + Creates a RagCorpus. + + Returns: + Callable[[~.CreateRagCorpusRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_rag_corpus" not in self._stubs: + self._stubs["create_rag_corpus"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/CreateRagCorpus", + request_serializer=vertex_rag_data_service.CreateRagCorpusRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_rag_corpus"] + + @property + def get_rag_corpus( + self, + ) -> Callable[ + [vertex_rag_data_service.GetRagCorpusRequest], + Awaitable[vertex_rag_data.RagCorpus], + ]: + r"""Return a callable for the get rag corpus method over gRPC. + + Gets a RagCorpus. + + Returns: + Callable[[~.GetRagCorpusRequest], + Awaitable[~.RagCorpus]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_rag_corpus" not in self._stubs: + self._stubs["get_rag_corpus"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/GetRagCorpus", + request_serializer=vertex_rag_data_service.GetRagCorpusRequest.serialize, + response_deserializer=vertex_rag_data.RagCorpus.deserialize, + ) + return self._stubs["get_rag_corpus"] + + @property + def list_rag_corpora( + self, + ) -> Callable[ + [vertex_rag_data_service.ListRagCorporaRequest], + Awaitable[vertex_rag_data_service.ListRagCorporaResponse], + ]: + r"""Return a callable for the list rag corpora method over gRPC. + + Lists RagCorpora in a Location. + + Returns: + Callable[[~.ListRagCorporaRequest], + Awaitable[~.ListRagCorporaResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_rag_corpora" not in self._stubs: + self._stubs["list_rag_corpora"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/ListRagCorpora", + request_serializer=vertex_rag_data_service.ListRagCorporaRequest.serialize, + response_deserializer=vertex_rag_data_service.ListRagCorporaResponse.deserialize, + ) + return self._stubs["list_rag_corpora"] + + @property + def delete_rag_corpus( + self, + ) -> Callable[ + [vertex_rag_data_service.DeleteRagCorpusRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete rag corpus method over gRPC. + + Deletes a RagCorpus. + + Returns: + Callable[[~.DeleteRagCorpusRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_rag_corpus" not in self._stubs: + self._stubs["delete_rag_corpus"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/DeleteRagCorpus", + request_serializer=vertex_rag_data_service.DeleteRagCorpusRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_rag_corpus"] + + @property + def upload_rag_file( + self, + ) -> Callable[ + [vertex_rag_data_service.UploadRagFileRequest], + Awaitable[vertex_rag_data_service.UploadRagFileResponse], + ]: + r"""Return a callable for the upload rag file method over gRPC. + + Upload a file into a RagCorpus. + + Returns: + Callable[[~.UploadRagFileRequest], + Awaitable[~.UploadRagFileResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "upload_rag_file" not in self._stubs: + self._stubs["upload_rag_file"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/UploadRagFile", + request_serializer=vertex_rag_data_service.UploadRagFileRequest.serialize, + response_deserializer=vertex_rag_data_service.UploadRagFileResponse.deserialize, + ) + return self._stubs["upload_rag_file"] + + @property + def import_rag_files( + self, + ) -> Callable[ + [vertex_rag_data_service.ImportRagFilesRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the import rag files method over gRPC. + + Import files from Google Cloud Storage or Google + Drive into a RagCorpus. + + Returns: + Callable[[~.ImportRagFilesRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "import_rag_files" not in self._stubs: + self._stubs["import_rag_files"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/ImportRagFiles", + request_serializer=vertex_rag_data_service.ImportRagFilesRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["import_rag_files"] + + @property + def get_rag_file( + self, + ) -> Callable[ + [vertex_rag_data_service.GetRagFileRequest], Awaitable[vertex_rag_data.RagFile] + ]: + r"""Return a callable for the get rag file method over gRPC. + + Gets a RagFile. + + Returns: + Callable[[~.GetRagFileRequest], + Awaitable[~.RagFile]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_rag_file" not in self._stubs: + self._stubs["get_rag_file"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/GetRagFile", + request_serializer=vertex_rag_data_service.GetRagFileRequest.serialize, + response_deserializer=vertex_rag_data.RagFile.deserialize, + ) + return self._stubs["get_rag_file"] + + @property + def list_rag_files( + self, + ) -> Callable[ + [vertex_rag_data_service.ListRagFilesRequest], + Awaitable[vertex_rag_data_service.ListRagFilesResponse], + ]: + r"""Return a callable for the list rag files method over gRPC. + + Lists RagFiles in a RagCorpus. + + Returns: + Callable[[~.ListRagFilesRequest], + Awaitable[~.ListRagFilesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_rag_files" not in self._stubs: + self._stubs["list_rag_files"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/ListRagFiles", + request_serializer=vertex_rag_data_service.ListRagFilesRequest.serialize, + response_deserializer=vertex_rag_data_service.ListRagFilesResponse.deserialize, + ) + return self._stubs["list_rag_files"] + + @property + def delete_rag_file( + self, + ) -> Callable[ + [vertex_rag_data_service.DeleteRagFileRequest], + Awaitable[operations_pb2.Operation], + ]: + r"""Return a callable for the delete rag file method over gRPC. + + Deletes a RagFile. + + Returns: + Callable[[~.DeleteRagFileRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_rag_file" not in self._stubs: + self._stubs["delete_rag_file"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagDataService/DeleteRagFile", + request_serializer=vertex_rag_data_service.DeleteRagFileRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["delete_rag_file"] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("VertexRagDataServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest.py new file mode 100644 index 0000000000..9ac548f5db --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_data_service/transports/rest.py @@ -0,0 +1,6145 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data_service +from google.longrunning import operations_pb2 # type: ignore + +from .base import ( + VertexRagDataServiceTransport, + DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, +) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class VertexRagDataServiceRestInterceptor: + """Interceptor for VertexRagDataService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the VertexRagDataServiceRestTransport. + + .. code-block:: python + class MyCustomVertexRagDataServiceInterceptor(VertexRagDataServiceRestInterceptor): + def pre_create_rag_corpus(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_rag_corpus(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_rag_corpus(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_rag_corpus(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_rag_file(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_delete_rag_file(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_rag_corpus(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_rag_corpus(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_rag_file(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_rag_file(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_import_rag_files(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_import_rag_files(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_rag_corpora(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_rag_corpora(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_rag_files(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_rag_files(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_upload_rag_file(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_upload_rag_file(self, response): + logging.log(f"Received response: {response}") + return response + + transport = VertexRagDataServiceRestTransport(interceptor=MyCustomVertexRagDataServiceInterceptor()) + client = VertexRagDataServiceClient(transport=transport) + + + """ + + def pre_create_rag_corpus( + self, + request: vertex_rag_data_service.CreateRagCorpusRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + vertex_rag_data_service.CreateRagCorpusRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for create_rag_corpus + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_create_rag_corpus( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for create_rag_corpus + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_delete_rag_corpus( + self, + request: vertex_rag_data_service.DeleteRagCorpusRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + vertex_rag_data_service.DeleteRagCorpusRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for delete_rag_corpus + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_delete_rag_corpus( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_rag_corpus + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_delete_rag_file( + self, + request: vertex_rag_data_service.DeleteRagFileRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[vertex_rag_data_service.DeleteRagFileRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_rag_file + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_delete_rag_file( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for delete_rag_file + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_get_rag_corpus( + self, + request: vertex_rag_data_service.GetRagCorpusRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[vertex_rag_data_service.GetRagCorpusRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_rag_corpus + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_get_rag_corpus( + self, response: vertex_rag_data.RagCorpus + ) -> vertex_rag_data.RagCorpus: + """Post-rpc interceptor for get_rag_corpus + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_get_rag_file( + self, + request: vertex_rag_data_service.GetRagFileRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[vertex_rag_data_service.GetRagFileRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_rag_file + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_get_rag_file( + self, response: vertex_rag_data.RagFile + ) -> vertex_rag_data.RagFile: + """Post-rpc interceptor for get_rag_file + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_import_rag_files( + self, + request: vertex_rag_data_service.ImportRagFilesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + vertex_rag_data_service.ImportRagFilesRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for import_rag_files + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_import_rag_files( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for import_rag_files + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_list_rag_corpora( + self, + request: vertex_rag_data_service.ListRagCorporaRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[ + vertex_rag_data_service.ListRagCorporaRequest, Sequence[Tuple[str, str]] + ]: + """Pre-rpc interceptor for list_rag_corpora + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_list_rag_corpora( + self, response: vertex_rag_data_service.ListRagCorporaResponse + ) -> vertex_rag_data_service.ListRagCorporaResponse: + """Post-rpc interceptor for list_rag_corpora + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_list_rag_files( + self, + request: vertex_rag_data_service.ListRagFilesRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[vertex_rag_data_service.ListRagFilesRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_rag_files + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_list_rag_files( + self, response: vertex_rag_data_service.ListRagFilesResponse + ) -> vertex_rag_data_service.ListRagFilesResponse: + """Post-rpc interceptor for list_rag_files + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_upload_rag_file( + self, + request: vertex_rag_data_service.UploadRagFileRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[vertex_rag_data_service.UploadRagFileRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for upload_rag_file + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_upload_rag_file( + self, response: vertex_rag_data_service.UploadRagFileResponse + ) -> vertex_rag_data_service.UploadRagFileResponse: + """Post-rpc interceptor for upload_rag_file + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.WaitOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagDataService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the VertexRagDataService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class VertexRagDataServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: VertexRagDataServiceRestInterceptor + + +class VertexRagDataServiceRestTransport(VertexRagDataServiceTransport): + """REST backend transport for VertexRagDataService. + + A service for managing user data for RAG. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via an issue in this + library's source repository. Thank you! + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[VertexRagDataServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via a GitHub issue in + this library's repository. Thank you! + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or VertexRagDataServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + "google.longrunning.Operations.CancelOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ], + "google.longrunning.Operations.DeleteOperation": [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ], + "google.longrunning.Operations.GetOperation": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ], + "google.longrunning.Operations.ListOperations": [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", + }, + ], + "google.longrunning.Operations.WaitOperation": [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1beta1", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _CreateRagCorpus(VertexRagDataServiceRestStub): + def __hash__(self): + return hash("CreateRagCorpus") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: vertex_rag_data_service.CreateRagCorpusRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the create rag corpus method over HTTP. + + Args: + request (~.vertex_rag_data_service.CreateRagCorpusRequest): + The request object. Request message for + [VertexRagDataService.CreateRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.CreateRagCorpus]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{parent=projects/*/locations/*}/ragCorpora", + "body": "rag_corpus", + }, + ] + request, metadata = self._interceptor.pre_create_rag_corpus( + request, metadata + ) + pb_request = vertex_rag_data_service.CreateRagCorpusRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_rag_corpus(resp) + return resp + + class _DeleteRagCorpus(VertexRagDataServiceRestStub): + def __hash__(self): + return hash("DeleteRagCorpus") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: vertex_rag_data_service.DeleteRagCorpusRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete rag corpus method over HTTP. + + Args: + request (~.vertex_rag_data_service.DeleteRagCorpusRequest): + The request object. Request message for + [VertexRagDataService.DeleteRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.DeleteRagCorpus]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_rag_corpus( + request, metadata + ) + pb_request = vertex_rag_data_service.DeleteRagCorpusRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_rag_corpus(resp) + return resp + + class _DeleteRagFile(VertexRagDataServiceRestStub): + def __hash__(self): + return hash("DeleteRagFile") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: vertex_rag_data_service.DeleteRagFileRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the delete rag file method over HTTP. + + Args: + request (~.vertex_rag_data_service.DeleteRagFileRequest): + The request object. Request message for + [VertexRagDataService.DeleteRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.DeleteRagFile]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}", + }, + ] + request, metadata = self._interceptor.pre_delete_rag_file(request, metadata) + pb_request = vertex_rag_data_service.DeleteRagFileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_delete_rag_file(resp) + return resp + + class _GetRagCorpus(VertexRagDataServiceRestStub): + def __hash__(self): + return hash("GetRagCorpus") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: vertex_rag_data_service.GetRagCorpusRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_data.RagCorpus: + r"""Call the get rag corpus method over HTTP. + + Args: + request (~.vertex_rag_data_service.GetRagCorpusRequest): + The request object. Request message for + [VertexRagDataService.GetRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.GetRagCorpus] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.vertex_rag_data.RagCorpus: + A RagCorpus is a RagFile container + and a project can have multiple + RagCorpora. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}", + }, + ] + request, metadata = self._interceptor.pre_get_rag_corpus(request, metadata) + pb_request = vertex_rag_data_service.GetRagCorpusRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = vertex_rag_data.RagCorpus() + pb_resp = vertex_rag_data.RagCorpus.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_rag_corpus(resp) + return resp + + class _GetRagFile(VertexRagDataServiceRestStub): + def __hash__(self): + return hash("GetRagFile") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: vertex_rag_data_service.GetRagFileRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_data.RagFile: + r"""Call the get rag file method over HTTP. + + Args: + request (~.vertex_rag_data_service.GetRagFileRequest): + The request object. Request message for + [VertexRagDataService.GetRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.GetRagFile] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.vertex_rag_data.RagFile: + A RagFile contains user data for + chunking, embedding and indexing. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}", + }, + ] + request, metadata = self._interceptor.pre_get_rag_file(request, metadata) + pb_request = vertex_rag_data_service.GetRagFileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = vertex_rag_data.RagFile() + pb_resp = vertex_rag_data.RagFile.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_rag_file(resp) + return resp + + class _ImportRagFiles(VertexRagDataServiceRestStub): + def __hash__(self): + return hash("ImportRagFiles") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: vertex_rag_data_service.ImportRagFilesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the import rag files method over HTTP. + + Args: + request (~.vertex_rag_data_service.ImportRagFilesRequest): + The request object. Request message for + [VertexRagDataService.ImportRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{parent=projects/*/locations/*/ragCorpora/*}/ragFiles:import", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_import_rag_files( + request, metadata + ) + pb_request = vertex_rag_data_service.ImportRagFilesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_import_rag_files(resp) + return resp + + class _ListRagCorpora(VertexRagDataServiceRestStub): + def __hash__(self): + return hash("ListRagCorpora") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: vertex_rag_data_service.ListRagCorporaRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_data_service.ListRagCorporaResponse: + r"""Call the list rag corpora method over HTTP. + + Args: + request (~.vertex_rag_data_service.ListRagCorporaRequest): + The request object. Request message for + [VertexRagDataService.ListRagCorpora][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagCorpora]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.vertex_rag_data_service.ListRagCorporaResponse: + Response message for + [VertexRagDataService.ListRagCorpora][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagCorpora]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{parent=projects/*/locations/*}/ragCorpora", + }, + ] + request, metadata = self._interceptor.pre_list_rag_corpora( + request, metadata + ) + pb_request = vertex_rag_data_service.ListRagCorporaRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = vertex_rag_data_service.ListRagCorporaResponse() + pb_resp = vertex_rag_data_service.ListRagCorporaResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_rag_corpora(resp) + return resp + + class _ListRagFiles(VertexRagDataServiceRestStub): + def __hash__(self): + return hash("ListRagFiles") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: vertex_rag_data_service.ListRagFilesRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_data_service.ListRagFilesResponse: + r"""Call the list rag files method over HTTP. + + Args: + request (~.vertex_rag_data_service.ListRagFilesRequest): + The request object. Request message for + [VertexRagDataService.ListRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagFiles]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.vertex_rag_data_service.ListRagFilesResponse: + Response message for + [VertexRagDataService.ListRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagFiles]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/v1beta1/{parent=projects/*/locations/*/ragCorpora/*}/ragFiles", + }, + ] + request, metadata = self._interceptor.pre_list_rag_files(request, metadata) + pb_request = vertex_rag_data_service.ListRagFilesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = vertex_rag_data_service.ListRagFilesResponse() + pb_resp = vertex_rag_data_service.ListRagFilesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_rag_files(resp) + return resp + + class _UploadRagFile(VertexRagDataServiceRestStub): + def __hash__(self): + return hash("UploadRagFile") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: vertex_rag_data_service.UploadRagFileRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_data_service.UploadRagFileResponse: + r"""Call the upload rag file method over HTTP. + + Args: + request (~.vertex_rag_data_service.UploadRagFileRequest): + The request object. Request message for + [VertexRagDataService.UploadRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.vertex_rag_data_service.UploadRagFileResponse: + Response message for + [VertexRagDataService.UploadRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{parent=projects/*/locations/*/ragCorpora/*}/ragFiles:upload", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_upload_rag_file(request, metadata) + pb_request = vertex_rag_data_service.UploadRagFileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = vertex_rag_data_service.UploadRagFileResponse() + pb_resp = vertex_rag_data_service.UploadRagFileResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_upload_rag_file(resp) + return resp + + @property + def create_rag_corpus( + self, + ) -> Callable[ + [vertex_rag_data_service.CreateRagCorpusRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateRagCorpus(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_rag_corpus( + self, + ) -> Callable[ + [vertex_rag_data_service.DeleteRagCorpusRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteRagCorpus(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_rag_file( + self, + ) -> Callable[ + [vertex_rag_data_service.DeleteRagFileRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteRagFile(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_rag_corpus( + self, + ) -> Callable[ + [vertex_rag_data_service.GetRagCorpusRequest], vertex_rag_data.RagCorpus + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetRagCorpus(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_rag_file( + self, + ) -> Callable[[vertex_rag_data_service.GetRagFileRequest], vertex_rag_data.RagFile]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetRagFile(self._session, self._host, self._interceptor) # type: ignore + + @property + def import_rag_files( + self, + ) -> Callable[ + [vertex_rag_data_service.ImportRagFilesRequest], operations_pb2.Operation + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ImportRagFiles(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_rag_corpora( + self, + ) -> Callable[ + [vertex_rag_data_service.ListRagCorporaRequest], + vertex_rag_data_service.ListRagCorporaResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListRagCorpora(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_rag_files( + self, + ) -> Callable[ + [vertex_rag_data_service.ListRagFilesRequest], + vertex_rag_data_service.ListRagFilesResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListRagFiles(self._session, self._host, self._interceptor) # type: ignore + + @property + def upload_rag_file( + self, + ) -> Callable[ + [vertex_rag_data_service.UploadRagFileRequest], + vertex_rag_data_service.UploadRagFileResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UploadRagFile(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation(VertexRagDataServiceRestStub): + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_location(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.Location() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_location(resp) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations(VertexRagDataServiceRestStub): + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*}/locations", + }, + ] + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_locations(resp) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy(VertexRagDataServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + ] + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy(VertexRagDataServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + ] + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions(VertexRagDataServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + ] + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(VertexRagDataServiceRestStub): + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation(VertexRagDataServiceRestStub): + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(VertexRagDataServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(VertexRagDataServiceRestStub): + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation(VertexRagDataServiceRestStub): + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_wait_operation(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("VertexRagDataServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/__init__.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/__init__.py new file mode 100644 index 0000000000..7ec3aa5bd0 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import VertexRagServiceClient +from .async_client import VertexRagServiceAsyncClient + +__all__ = ( + "VertexRagServiceClient", + "VertexRagServiceAsyncClient", +) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/async_client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/async_client.py new file mode 100644 index 0000000000..61bfe8e1d6 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/async_client.py @@ -0,0 +1,1110 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry_async as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore + +from google.cloud.aiplatform_v1beta1.types import vertex_rag_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .transports.base import VertexRagServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import VertexRagServiceGrpcAsyncIOTransport +from .client import VertexRagServiceClient + + +class VertexRagServiceAsyncClient: + """A service for retrieving relevant contexts.""" + + _client: VertexRagServiceClient + + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = VertexRagServiceClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = VertexRagServiceClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = VertexRagServiceClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = VertexRagServiceClient._DEFAULT_UNIVERSE + + common_billing_account_path = staticmethod( + VertexRagServiceClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + VertexRagServiceClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(VertexRagServiceClient.common_folder_path) + parse_common_folder_path = staticmethod( + VertexRagServiceClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + VertexRagServiceClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + VertexRagServiceClient.parse_common_organization_path + ) + common_project_path = staticmethod(VertexRagServiceClient.common_project_path) + parse_common_project_path = staticmethod( + VertexRagServiceClient.parse_common_project_path + ) + common_location_path = staticmethod(VertexRagServiceClient.common_location_path) + parse_common_location_path = staticmethod( + VertexRagServiceClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VertexRagServiceAsyncClient: The constructed client. + """ + return VertexRagServiceClient.from_service_account_info.__func__(VertexRagServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VertexRagServiceAsyncClient: The constructed client. + """ + return VertexRagServiceClient.from_service_account_file.__func__(VertexRagServiceAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return VertexRagServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> VertexRagServiceTransport: + """Returns the transport used by the client instance. + + Returns: + VertexRagServiceTransport: The transport used by the client instance. + """ + return self._client.transport + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + + get_transport_class = functools.partial( + type(VertexRagServiceClient).get_transport_class, type(VertexRagServiceClient) + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, VertexRagServiceTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the vertex rag service async client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.VertexRagServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = VertexRagServiceClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def retrieve_contexts( + self, + request: Optional[ + Union[vertex_rag_service.RetrieveContextsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + query: Optional[vertex_rag_service.RagQuery] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_service.RetrieveContextsResponse: + r"""Retrieves relevant contexts for a query. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + async def sample_retrieve_contexts(): + # Create a client + client = aiplatform_v1beta1.VertexRagServiceAsyncClient() + + # Initialize request argument(s) + vertex_rag_store = aiplatform_v1beta1.VertexRagStore() + vertex_rag_store.rag_corpora = ['rag_corpora_value1', 'rag_corpora_value2'] + + query = aiplatform_v1beta1.RagQuery() + query.text = "text_value" + + request = aiplatform_v1beta1.RetrieveContextsRequest( + vertex_rag_store=vertex_rag_store, + parent="parent_value", + query=query, + ) + + # Make the request + response = await client.retrieve_contexts(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.aiplatform_v1beta1.types.RetrieveContextsRequest, dict]]): + The request object. Request message for + [VertexRagService.RetrieveContexts][google.cloud.aiplatform.v1beta1.VertexRagService.RetrieveContexts]. + parent (:class:`str`): + Required. The resource name of the Location from which + to retrieve RagContexts. The users must have permission + to make a call in the project. Format: + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (:class:`google.cloud.aiplatform_v1beta1.types.RagQuery`): + Required. Single RAG retrieve query. + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.RetrieveContextsResponse: + Response message for + [VertexRagService.RetrieveContexts][google.cloud.aiplatform.v1beta1.VertexRagService.RetrieveContexts]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, query]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = vertex_rag_service.RetrieveContextsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if query is not None: + request.query = query + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.retrieve_contexts, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "VertexRagServiceAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("VertexRagServiceAsyncClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/client.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/client.py new file mode 100644 index 0000000000..4280d980bf --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/client.py @@ -0,0 +1,1510 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) +import warnings + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + +from google.cloud.aiplatform_v1beta1.types import vertex_rag_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .transports.base import VertexRagServiceTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import VertexRagServiceGrpcTransport +from .transports.grpc_asyncio import VertexRagServiceGrpcAsyncIOTransport +from .transports.rest import VertexRagServiceRestTransport + + +class VertexRagServiceClientMeta(type): + """Metaclass for the VertexRagService client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[VertexRagServiceTransport]] + _transport_registry["grpc"] = VertexRagServiceGrpcTransport + _transport_registry["grpc_asyncio"] = VertexRagServiceGrpcAsyncIOTransport + _transport_registry["rest"] = VertexRagServiceRestTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[VertexRagServiceTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class VertexRagServiceClient(metaclass=VertexRagServiceClientMeta): + """A service for retrieving relevant contexts.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. + DEFAULT_ENDPOINT = "aiplatform.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + _DEFAULT_ENDPOINT_TEMPLATE = "aiplatform.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VertexRagServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VertexRagServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> VertexRagServiceTransport: + """Returns the transport used by the client instance. + + Returns: + VertexRagServiceTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Deprecated. Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + @staticmethod + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + @staticmethod + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = VertexRagServiceClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = VertexRagServiceClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = VertexRagServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = VertexRagServiceClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + + default_universe = VertexRagServiceClient._DEFAULT_UNIVERSE + credentials_universe = getattr(credentials, "universe_domain", default_universe) + + if client_universe != credentials_universe: + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or VertexRagServiceClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, VertexRagServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the vertex rag service client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, VertexRagServiceTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + NOTE: "rest" transport functionality is currently in a + beta state (preview). We welcome your feedback via an + issue in this library's source repository. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide a client certificate for mTLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) + + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = VertexRagServiceClient._read_environment_variables() + self._client_cert_source = VertexRagServiceClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = VertexRagServiceClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env + ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + transport_provided = isinstance(transport, VertexRagServiceTransport) + if transport_provided: + # transport is a VertexRagServiceTransport instance. + if credentials or self._client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if self._client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = cast(VertexRagServiceTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or VertexRagServiceClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(cast(str, transport)) + self._transport = Transport( + credentials=credentials, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=self._client_options.api_audience, + ) + + def retrieve_contexts( + self, + request: Optional[ + Union[vertex_rag_service.RetrieveContextsRequest, dict] + ] = None, + *, + parent: Optional[str] = None, + query: Optional[vertex_rag_service.RagQuery] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_service.RetrieveContextsResponse: + r"""Retrieves relevant contexts for a query. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import aiplatform_v1beta1 + + def sample_retrieve_contexts(): + # Create a client + client = aiplatform_v1beta1.VertexRagServiceClient() + + # Initialize request argument(s) + vertex_rag_store = aiplatform_v1beta1.VertexRagStore() + vertex_rag_store.rag_corpora = ['rag_corpora_value1', 'rag_corpora_value2'] + + query = aiplatform_v1beta1.RagQuery() + query.text = "text_value" + + request = aiplatform_v1beta1.RetrieveContextsRequest( + vertex_rag_store=vertex_rag_store, + parent="parent_value", + query=query, + ) + + # Make the request + response = client.retrieve_contexts(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.aiplatform_v1beta1.types.RetrieveContextsRequest, dict]): + The request object. Request message for + [VertexRagService.RetrieveContexts][google.cloud.aiplatform.v1beta1.VertexRagService.RetrieveContexts]. + parent (str): + Required. The resource name of the Location from which + to retrieve RagContexts. The users must have permission + to make a call in the project. Format: + ``projects/{project}/locations/{location}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + query (google.cloud.aiplatform_v1beta1.types.RagQuery): + Required. Single RAG retrieve query. + This corresponds to the ``query`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.aiplatform_v1beta1.types.RetrieveContextsResponse: + Response message for + [VertexRagService.RetrieveContexts][google.cloud.aiplatform.v1beta1.VertexRagService.RetrieveContexts]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, query]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a vertex_rag_service.RetrieveContextsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, vertex_rag_service.RetrieveContextsRequest): + request = vertex_rag_service.RetrieveContextsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if query is not None: + request.query = query + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.retrieve_contexts] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "VertexRagServiceClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + def list_operations( + self, + request: Optional[operations_pb2.ListOperationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + r"""Lists operations that match the specified filter in the request. + + Args: + request (:class:`~.operations_pb2.ListOperationsRequest`): + The request object. Request message for + `ListOperations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.ListOperationsResponse: + Response message for ``ListOperations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.ListOperationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_operations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation( + self, + request: Optional[operations_pb2.GetOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Gets the latest state of a long-running operation. + + Args: + request (:class:`~.operations_pb2.GetOperationRequest`): + The request object. Request message for + `GetOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.GetOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_operation( + self, + request: Optional[operations_pb2.DeleteOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a long-running operation. + + This method indicates that the client is no longer interested + in the operation result. It does not cancel the operation. + If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.DeleteOperationRequest`): + The request object. Request message for + `DeleteOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.DeleteOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.delete_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def cancel_operation( + self, + request: Optional[operations_pb2.CancelOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Starts asynchronous cancellation on a long-running operation. + + The server makes a best effort to cancel the operation, but success + is not guaranteed. If the server doesn't support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.CancelOperationRequest`): + The request object. Request message for + `CancelOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + None + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.CancelOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.cancel_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def wait_operation( + self, + request: Optional[operations_pb2.WaitOperationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Waits until the specified long-running operation is done or reaches at most + a specified timeout, returning the latest state. + + If the operation is already done, the latest state is immediately returned. + If the timeout specified is greater than the default HTTP/RPC timeout, the HTTP/RPC + timeout is used. If the server does not support this method, it returns + `google.rpc.Code.UNIMPLEMENTED`. + + Args: + request (:class:`~.operations_pb2.WaitOperationRequest`): + The request object. Request message for + `WaitOperation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.operations_pb2.Operation: + An ``Operation`` object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = operations_pb2.WaitOperationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.wait_operation, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy( + self, + request: Optional[iam_policy_pb2.SetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the IAM access control policy on the specified function. + + Replaces any existing policy. + + Args: + request (:class:`~.iam_policy_pb2.SetIamPolicyRequest`): + The request object. Request message for `SetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.set_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy( + self, + request: Optional[iam_policy_pb2.GetIamPolicyRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the IAM access control policy for a function. + + Returns an empty policy if the function exists and does not have a + policy set. + + Args: + request (:class:`~.iam_policy_pb2.GetIamPolicyRequest`): + The request object. Request message for `GetIamPolicy` + method. + retry (google.api_core.retry.Retry): Designation of what errors, if + any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.policy_pb2.Policy: + Defines an Identity and Access Management (IAM) policy. + It is used to specify access control policies for Cloud + Platform resources. + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions (defined by + IAM or configured by users). A ``binding`` can + optionally specify a ``condition``, which is a logic + expression that further constrains the role binding + based on attributes about the request and/or target + resource. + + **JSON Example** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": ["user:eve@example.com"], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ] + } + + **YAML Example** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + + For a description of IAM and its features, see the `IAM + developer's + guide `__. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_iam_policy, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions( + self, + request: Optional[iam_policy_pb2.TestIamPermissionsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Tests the specified IAM permissions against the IAM access control + policy for a function. + + If the function does not exist, this will return an empty set + of permissions, not a NOT_FOUND error. + + Args: + request (:class:`~.iam_policy_pb2.TestIamPermissionsRequest`): + The request object. Request message for + `TestIamPermissions` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + # Create or coerce a protobuf request object. + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.test_iam_permissions, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_location( + self, + request: Optional[locations_pb2.GetLocationRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + r"""Gets information about a location. + + Args: + request (:class:`~.location_pb2.GetLocationRequest`): + The request object. Request message for + `GetLocation` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.Location: + Location object. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.GetLocationRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.get_location, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations( + self, + request: Optional[locations_pb2.ListLocationsRequest] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + r"""Lists information about the supported locations for this service. + + Args: + request (:class:`~.location_pb2.ListLocationsRequest`): + The request object. Request message for + `ListLocations` method. + retry (google.api_core.retry.Retry): Designation of what errors, + if any, should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + Returns: + ~.location_pb2.ListLocationsResponse: + Response message for ``ListLocations`` method. + """ + # Create or coerce a protobuf request object. + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = locations_pb2.ListLocationsRequest(**request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method.wrap_method( + self._transport.list_locations, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("VertexRagServiceClient",) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/__init__.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/__init__.py new file mode 100644 index 0000000000..db8b8ea3d0 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import VertexRagServiceTransport +from .grpc import VertexRagServiceGrpcTransport +from .grpc_asyncio import VertexRagServiceGrpcAsyncIOTransport +from .rest import VertexRagServiceRestTransport +from .rest import VertexRagServiceRestInterceptor + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[VertexRagServiceTransport]] +_transport_registry["grpc"] = VertexRagServiceGrpcTransport +_transport_registry["grpc_asyncio"] = VertexRagServiceGrpcAsyncIOTransport +_transport_registry["rest"] = VertexRagServiceRestTransport + +__all__ = ( + "VertexRagServiceTransport", + "VertexRagServiceGrpcTransport", + "VertexRagServiceGrpcAsyncIOTransport", + "VertexRagServiceRestTransport", + "VertexRagServiceRestInterceptor", +) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/base.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/base.py new file mode 100644 index 0000000000..154482f3ee --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/base.py @@ -0,0 +1,260 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.aiplatform_v1beta1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.aiplatform_v1beta1.types import vertex_rag_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class VertexRagServiceTransport(abc.ABC): + """Abstract transport class for VertexRagService.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "aiplatform.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + @property + def host(self): + return self._host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.retrieve_contexts: gapic_v1.method.wrap_method( + self.retrieve_contexts, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def retrieve_contexts( + self, + ) -> Callable[ + [vertex_rag_service.RetrieveContextsRequest], + Union[ + vertex_rag_service.RetrieveContextsResponse, + Awaitable[vertex_rag_service.RetrieveContextsResponse], + ], + ]: + raise NotImplementedError() + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], + Union[ + operations_pb2.ListOperationsResponse, + Awaitable[operations_pb2.ListOperationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_operation( + self, + ) -> Callable[ + [operations_pb2.GetOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None,]: + raise NotImplementedError() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None,]: + raise NotImplementedError() + + @property + def wait_operation( + self, + ) -> Callable[ + [operations_pb2.WaitOperationRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def set_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def get_iam_policy( + self, + ) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]], + ]: + raise NotImplementedError() + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse], + ], + ]: + raise NotImplementedError() + + @property + def get_location( + self, + ) -> Callable[ + [locations_pb2.GetLocationRequest], + Union[locations_pb2.Location, Awaitable[locations_pb2.Location]], + ]: + raise NotImplementedError() + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], + Union[ + locations_pb2.ListLocationsResponse, + Awaitable[locations_pb2.ListLocationsResponse], + ], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("VertexRagServiceTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc.py new file mode 100644 index 0000000000..b184da4455 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc.py @@ -0,0 +1,476 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.aiplatform_v1beta1.types import vertex_rag_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import VertexRagServiceTransport, DEFAULT_CLIENT_INFO + + +class VertexRagServiceGrpcTransport(VertexRagServiceTransport): + """gRPC backend transport for VertexRagService. + + A service for retrieving relevant contexts. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def retrieve_contexts( + self, + ) -> Callable[ + [vertex_rag_service.RetrieveContextsRequest], + vertex_rag_service.RetrieveContextsResponse, + ]: + r"""Return a callable for the retrieve contexts method over gRPC. + + Retrieves relevant contexts for a query. + + Returns: + Callable[[~.RetrieveContextsRequest], + ~.RetrieveContextsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "retrieve_contexts" not in self._stubs: + self._stubs["retrieve_contexts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagService/RetrieveContexts", + request_serializer=vertex_rag_service.RetrieveContextsRequest.serialize, + response_deserializer=vertex_rag_service.RetrieveContextsResponse.deserialize, + ) + return self._stubs["retrieve_contexts"] + + def close(self): + self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("VertexRagServiceGrpcTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc_asyncio.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc_asyncio.py new file mode 100644 index 0000000000..5aa450bc9c --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/grpc_asyncio.py @@ -0,0 +1,475 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.aiplatform_v1beta1.types import vertex_rag_service +from google.cloud.location import locations_pb2 # type: ignore +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from .base import VertexRagServiceTransport, DEFAULT_CLIENT_INFO +from .grpc import VertexRagServiceGrpcTransport + + +class VertexRagServiceGrpcAsyncIOTransport(VertexRagServiceTransport): + """gRPC AsyncIO backend transport for VertexRagService. + + A service for retrieving relevant contexts. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def retrieve_contexts( + self, + ) -> Callable[ + [vertex_rag_service.RetrieveContextsRequest], + Awaitable[vertex_rag_service.RetrieveContextsResponse], + ]: + r"""Return a callable for the retrieve contexts method over gRPC. + + Retrieves relevant contexts for a query. + + Returns: + Callable[[~.RetrieveContextsRequest], + Awaitable[~.RetrieveContextsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "retrieve_contexts" not in self._stubs: + self._stubs["retrieve_contexts"] = self.grpc_channel.unary_unary( + "/google.cloud.aiplatform.v1beta1.VertexRagService/RetrieveContexts", + request_serializer=vertex_rag_service.RetrieveContextsRequest.serialize, + response_deserializer=vertex_rag_service.RetrieveContextsResponse.deserialize, + ) + return self._stubs["retrieve_contexts"] + + def close(self): + return self.grpc_channel.close() + + @property + def delete_operation( + self, + ) -> Callable[[operations_pb2.DeleteOperationRequest], None]: + r"""Return a callable for the delete_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/DeleteOperation", + request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["delete_operation"] + + @property + def cancel_operation( + self, + ) -> Callable[[operations_pb2.CancelOperationRequest], None]: + r"""Return a callable for the cancel_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "cancel_operation" not in self._stubs: + self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/CancelOperation", + request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["cancel_operation"] + + @property + def wait_operation( + self, + ) -> Callable[[operations_pb2.WaitOperationRequest], None]: + r"""Return a callable for the wait_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "delete_operation" not in self._stubs: + self._stubs["wait_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/WaitOperation", + request_serializer=operations_pb2.WaitOperationRequest.SerializeToString, + response_deserializer=None, + ) + return self._stubs["wait_operation"] + + @property + def get_operation( + self, + ) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]: + r"""Return a callable for the get_operation method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_operation" not in self._stubs: + self._stubs["get_operation"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/GetOperation", + request_serializer=operations_pb2.GetOperationRequest.SerializeToString, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["get_operation"] + + @property + def list_operations( + self, + ) -> Callable[ + [operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse + ]: + r"""Return a callable for the list_operations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_operations" not in self._stubs: + self._stubs["list_operations"] = self.grpc_channel.unary_unary( + "/google.longrunning.Operations/ListOperations", + request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, + response_deserializer=operations_pb2.ListOperationsResponse.FromString, + ) + return self._stubs["list_operations"] + + @property + def list_locations( + self, + ) -> Callable[ + [locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse + ]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "list_locations" not in self._stubs: + self._stubs["list_locations"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/ListLocations", + request_serializer=locations_pb2.ListLocationsRequest.SerializeToString, + response_deserializer=locations_pb2.ListLocationsResponse.FromString, + ) + return self._stubs["list_locations"] + + @property + def get_location( + self, + ) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]: + r"""Return a callable for the list locations method over gRPC.""" + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_location" not in self._stubs: + self._stubs["get_location"] = self.grpc_channel.unary_unary( + "/google.cloud.location.Locations/GetLocation", + request_serializer=locations_pb2.GetLocationRequest.SerializeToString, + response_deserializer=locations_pb2.Location.FromString, + ) + return self._stubs["get_location"] + + @property + def set_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + Sets the IAM access control policy on the specified + function. Replaces any existing policy. + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "set_iam_policy" not in self._stubs: + self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/SetIamPolicy", + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["set_iam_policy"] + + @property + def get_iam_policy( + self, + ) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + Gets the IAM access control policy for a function. + Returns an empty policy if the function exists and does + not have a policy set. + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_iam_policy" not in self._stubs: + self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/GetIamPolicy", + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs["get_iam_policy"] + + @property + def test_iam_permissions( + self, + ) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse, + ]: + r"""Return a callable for the test iam permissions method over gRPC. + Tests the specified permissions against the IAM access control + policy for a function. If the function does not exist, this will + return an empty set of permissions, not a NOT_FOUND error. + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "test_iam_permissions" not in self._stubs: + self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + "/google.iam.v1.IAMPolicy/TestIamPermissions", + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs["test_iam_permissions"] + + +__all__ = ("VertexRagServiceGrpcAsyncIOTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/rest.py new file mode 100644 index 0000000000..dff2d3b314 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/services/vertex_rag_service/transports/rest.py @@ -0,0 +1,3227 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.cloud.location import locations_pb2 # type: ignore +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object, None] # type: ignore + + +from google.cloud.aiplatform_v1beta1.types import vertex_rag_service +from google.longrunning import operations_pb2 # type: ignore + +from .base import ( + VertexRagServiceTransport, + DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO, +) + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class VertexRagServiceRestInterceptor: + """Interceptor for VertexRagService. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the VertexRagServiceRestTransport. + + .. code-block:: python + class MyCustomVertexRagServiceInterceptor(VertexRagServiceRestInterceptor): + def pre_retrieve_contexts(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_retrieve_contexts(self, response): + logging.log(f"Received response: {response}") + return response + + transport = VertexRagServiceRestTransport(interceptor=MyCustomVertexRagServiceInterceptor()) + client = VertexRagServiceClient(transport=transport) + + + """ + + def pre_retrieve_contexts( + self, + request: vertex_rag_service.RetrieveContextsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[vertex_rag_service.RetrieveContextsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for retrieve_contexts + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagService server. + """ + return request, metadata + + def post_retrieve_contexts( + self, response: vertex_rag_service.RetrieveContextsResponse + ) -> vertex_rag_service.RetrieveContextsResponse: + """Post-rpc interceptor for retrieve_contexts + + Override in a subclass to manipulate the response + after it is returned by the VertexRagService server but before + it is returned to user code. + """ + return response + + def pre_get_location( + self, + request: locations_pb2.GetLocationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.GetLocationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_location + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagService server. + """ + return request, metadata + + def post_get_location( + self, response: locations_pb2.Location + ) -> locations_pb2.Location: + """Post-rpc interceptor for get_location + + Override in a subclass to manipulate the response + after it is returned by the VertexRagService server but before + it is returned to user code. + """ + return response + + def pre_list_locations( + self, + request: locations_pb2.ListLocationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[locations_pb2.ListLocationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_locations + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagService server. + """ + return request, metadata + + def post_list_locations( + self, response: locations_pb2.ListLocationsResponse + ) -> locations_pb2.ListLocationsResponse: + """Post-rpc interceptor for list_locations + + Override in a subclass to manipulate the response + after it is returned by the VertexRagService server but before + it is returned to user code. + """ + return response + + def pre_get_iam_policy( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagService server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the VertexRagService server but before + it is returned to user code. + """ + return response + + def pre_set_iam_policy( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagService server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the VertexRagService server but before + it is returned to user code. + """ + return response + + def pre_test_iam_permissions( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagService server. + """ + return request, metadata + + def post_test_iam_permissions( + self, response: iam_policy_pb2.TestIamPermissionsResponse + ) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the VertexRagService server but before + it is returned to user code. + """ + return response + + def pre_cancel_operation( + self, + request: operations_pb2.CancelOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagService server. + """ + return request, metadata + + def post_cancel_operation(self, response: None) -> None: + """Post-rpc interceptor for cancel_operation + + Override in a subclass to manipulate the response + after it is returned by the VertexRagService server but before + it is returned to user code. + """ + return response + + def pre_delete_operation( + self, + request: operations_pb2.DeleteOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagService server. + """ + return request, metadata + + def post_delete_operation(self, response: None) -> None: + """Post-rpc interceptor for delete_operation + + Override in a subclass to manipulate the response + after it is returned by the VertexRagService server but before + it is returned to user code. + """ + return response + + def pre_get_operation( + self, + request: operations_pb2.GetOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagService server. + """ + return request, metadata + + def post_get_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for get_operation + + Override in a subclass to manipulate the response + after it is returned by the VertexRagService server but before + it is returned to user code. + """ + return response + + def pre_list_operations( + self, + request: operations_pb2.ListOperationsRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_operations + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagService server. + """ + return request, metadata + + def post_list_operations( + self, response: operations_pb2.ListOperationsResponse + ) -> operations_pb2.ListOperationsResponse: + """Post-rpc interceptor for list_operations + + Override in a subclass to manipulate the response + after it is returned by the VertexRagService server but before + it is returned to user code. + """ + return response + + def pre_wait_operation( + self, + request: operations_pb2.WaitOperationRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[operations_pb2.WaitOperationRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for wait_operation + + Override in a subclass to manipulate the request or metadata + before they are sent to the VertexRagService server. + """ + return request, metadata + + def post_wait_operation( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for wait_operation + + Override in a subclass to manipulate the response + after it is returned by the VertexRagService server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class VertexRagServiceRestStub: + _session: AuthorizedSession + _host: str + _interceptor: VertexRagServiceRestInterceptor + + +class VertexRagServiceRestTransport(VertexRagServiceTransport): + """REST backend transport for VertexRagService. + + A service for retrieving relevant contexts. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via an issue in this + library's source repository. Thank you! + """ + + def __init__( + self, + *, + host: str = "aiplatform.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = "https", + interceptor: Optional[VertexRagServiceRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + NOTE: This REST transport functionality is currently in a beta + state (preview). We welcome your feedback via a GitHub issue in + this library's repository. Thank you! + + Args: + host (Optional[str]): + The hostname to connect to (default: 'aiplatform.googleapis.com'). + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError( + f"Unexpected hostname structure: {host}" + ) # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST + ) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or VertexRagServiceRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _RetrieveContexts(VertexRagServiceRestStub): + def __hash__(self): + return hash("RetrieveContexts") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: vertex_rag_service.RetrieveContextsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> vertex_rag_service.RetrieveContextsResponse: + r"""Call the retrieve contexts method over HTTP. + + Args: + request (~.vertex_rag_service.RetrieveContextsRequest): + The request object. Request message for + [VertexRagService.RetrieveContexts][google.cloud.aiplatform.v1beta1.VertexRagService.RetrieveContexts]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.vertex_rag_service.RetrieveContextsResponse: + Response message for + [VertexRagService.RetrieveContexts][google.cloud.aiplatform.v1beta1.VertexRagService.RetrieveContexts]. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{parent=projects/*/locations/*}:retrieveContexts", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_retrieve_contexts( + request, metadata + ) + pb_request = vertex_rag_service.RetrieveContextsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=False + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=False, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = vertex_rag_service.RetrieveContextsResponse() + pb_resp = vertex_rag_service.RetrieveContextsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_retrieve_contexts(resp) + return resp + + @property + def retrieve_contexts( + self, + ) -> Callable[ + [vertex_rag_service.RetrieveContextsRequest], + vertex_rag_service.RetrieveContextsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._RetrieveContexts(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_location(self): + return self._GetLocation(self._session, self._host, self._interceptor) # type: ignore + + class _GetLocation(VertexRagServiceRestStub): + def __call__( + self, + request: locations_pb2.GetLocationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.Location: + + r"""Call the get location method over HTTP. + + Args: + request (locations_pb2.GetLocationRequest): + The request object for GetLocation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.Location: Response from GetLocation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_location(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.Location() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_location(resp) + return resp + + @property + def list_locations(self): + return self._ListLocations(self._session, self._host, self._interceptor) # type: ignore + + class _ListLocations(VertexRagServiceRestStub): + def __call__( + self, + request: locations_pb2.ListLocationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> locations_pb2.ListLocationsResponse: + + r"""Call the list locations method over HTTP. + + Args: + request (locations_pb2.ListLocationsRequest): + The request object for ListLocations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + locations_pb2.ListLocationsResponse: Response from ListLocations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*}/locations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*}/locations", + }, + ] + + request, metadata = self._interceptor.pre_list_locations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = locations_pb2.ListLocationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_locations(resp) + return resp + + @property + def get_iam_policy(self): + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _GetIamPolicy(VertexRagServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.GetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the get iam policy method over HTTP. + + Args: + request (iam_policy_pb2.GetIamPolicyRequest): + The request object for GetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from GetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/publishers/*/models/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:getIamPolicy", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:getIamPolicy", + }, + ] + + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + @property + def set_iam_policy(self): + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + class _SetIamPolicy(VertexRagServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.SetIamPolicyRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + + r"""Call the set iam policy method over HTTP. + + Args: + request (iam_policy_pb2.SetIamPolicyRequest): + The request object for SetIamPolicy method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + policy_pb2.Policy: Response from SetIamPolicy method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:setIamPolicy", + "body": "*", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:setIamPolicy", + "body": "*", + }, + ] + + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = policy_pb2.Policy() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + @property + def test_iam_permissions(self): + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + class _TestIamPermissions(VertexRagServiceRestStub): + def __call__( + self, + request: iam_policy_pb2.TestIamPermissionsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + + r"""Call the test iam permissions method over HTTP. + + Args: + request (iam_policy_pb2.TestIamPermissionsRequest): + The request object for TestIamPermissions method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + iam_policy_pb2.TestIamPermissionsResponse: Response from TestIamPermissions method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + "body": "*", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/v1beta1/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featurestores/*/entityTypes/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/models/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/endpoints/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/notebookRuntimeTemplates/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*}:testIamPermissions", + }, + { + "method": "post", + "uri": "/ui/{resource=projects/*/locations/*/featureOnlineStores/*/featureViews/*}:testIamPermissions", + }, + ] + + request, metadata = self._interceptor.pre_test_iam_permissions( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + body = json.dumps(transcoded_request["body"]) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = iam_policy_pb2.TestIamPermissionsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + @property + def cancel_operation(self): + return self._CancelOperation(self._session, self._host, self._interceptor) # type: ignore + + class _CancelOperation(VertexRagServiceRestStub): + def __call__( + self, + request: operations_pb2.CancelOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the cancel operation method over HTTP. + + Args: + request (operations_pb2.CancelOperationRequest): + The request object for CancelOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:cancel", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:cancel", + }, + ] + + request, metadata = self._interceptor.pre_cancel_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_cancel_operation(None) + + @property + def delete_operation(self): + return self._DeleteOperation(self._session, self._host, self._interceptor) # type: ignore + + class _DeleteOperation(VertexRagServiceRestStub): + def __call__( + self, + request: operations_pb2.DeleteOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + + r"""Call the delete operation method over HTTP. + + Args: + request (operations_pb2.DeleteOperationRequest): + The request object for DeleteOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "delete", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_delete_operation( + request, metadata + ) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + return self._interceptor.post_delete_operation(None) + + @property + def get_operation(self): + return self._GetOperation(self._session, self._host, self._interceptor) # type: ignore + + class _GetOperation(VertexRagServiceRestStub): + def __call__( + self, + request: operations_pb2.GetOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the get operation method over HTTP. + + Args: + request (operations_pb2.GetOperationRequest): + The request object for GetOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from GetOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDeploymentJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}", + }, + ] + + request, metadata = self._interceptor.pre_get_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_get_operation(resp) + return resp + + @property + def list_operations(self): + return self._ListOperations(self._session, self._host, self._interceptor) # type: ignore + + class _ListOperations(VertexRagServiceRestStub): + def __call__( + self, + request: operations_pb2.ListOperationsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.ListOperationsResponse: + + r"""Call the list operations method over HTTP. + + Args: + request (operations_pb2.ListOperationsRequest): + The request object for ListOperations method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.ListOperationsResponse: Response from ListOperations method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/solvers/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*}/operations", + }, + { + "method": "get", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", + }, + ] + + request, metadata = self._interceptor.pre_list_operations(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.ListOperationsResponse() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_list_operations(resp) + return resp + + @property + def wait_operation(self): + return self._WaitOperation(self._session, self._host, self._interceptor) # type: ignore + + class _WaitOperation(VertexRagServiceRestStub): + def __call__( + self, + request: operations_pb2.WaitOperationRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + + r"""Call the wait operation method over HTTP. + + Args: + request (operations_pb2.WaitOperationRequest): + The request object for WaitOperation method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + operations_pb2.Operation: Response from WaitOperation method. + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/savedQueries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/datasets/*/dataItems/*/annotations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/deploymentResourcePools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/edgeDevices/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/endpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/evaluationTasks/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/exampleStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensionControllers/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/extensions/*/deployments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featurestores/*/entityTypes/*/features/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/customJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexes/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/indexEndpoints/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/artifacts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/contexts/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/metadataStores/*/executions/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelDeploymentMonitoringJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/modelMonitors/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/migratableResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/reasoningEngines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/studies/*/trials/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/schedules/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/specialistPools/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/tensorboards/*/experiments/*/runs/*/timeSeries/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureOnlineStores/*/featureViews/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/operations/*}:wait", + }, + { + "method": "post", + "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*/operations/*}:wait", + }, + ] + + request, metadata = self._interceptor.pre_wait_operation(request, metadata) + request_kwargs = json_format.MessageToDict(request) + transcoded_request = path_template.transcode(http_options, **request_kwargs) + + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads(json.dumps(transcoded_request["query_params"])) + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + resp = operations_pb2.Operation() + resp = json_format.Parse(response.content.decode("utf-8"), resp) + resp = self._interceptor.post_wait_operation(resp) + return resp + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__ = ("VertexRagServiceRestTransport",) diff --git a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py index 9a4279ecf7..d3d280097d 100644 --- a/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py +++ b/google/cloud/aiplatform_v1beta1/services/vizier_service/transports/rest.py @@ -962,6 +962,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -1292,6 +1296,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -1670,6 +1678,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -2056,6 +2068,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -2300,10 +2316,6 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ], "google.longrunning.Operations.WaitOperation": [ { @@ -2434,6 +2446,10 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", @@ -4829,6 +4845,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}:cancel", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:cancel", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}:cancel", @@ -5216,6 +5236,10 @@ def __call__( "method": "delete", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "delete", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "delete", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -5654,6 +5678,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/models/*/evaluations/*/operations/*}", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/studies/*/operations/*}", @@ -6101,6 +6129,10 @@ def __call__( "method": "get", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*}/operations", }, + { + "method": "get", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*}/operations", + }, { "method": "get", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*}/operations", @@ -6345,10 +6377,6 @@ def __call__( "method": "get", "uri": "/v1beta1/{name=projects/*/locations/*/featureGroups/*/features/*}/operations", }, - { - "method": "get", - "uri": "/ui/{name=projects/*/locations/*/tuningJobs/*}/operations", - }, ] request, metadata = self._interceptor.pre_list_operations(request, metadata) @@ -6540,6 +6568,10 @@ def __call__( "method": "post", "uri": "/ui/{name=projects/*/locations/*/trainingPipelines/*/operations/*}:wait", }, + { + "method": "post", + "uri": "/ui/{name=projects/*/locations/*/persistentResources/*/operations/*}:wait", + }, { "method": "post", "uri": "/ui/{name=projects/*/locations/*/pipelineJobs/*/operations/*}:wait", diff --git a/google/cloud/aiplatform_v1beta1/types/__init__.py b/google/cloud/aiplatform_v1beta1/types/__init__.py index 62b539fee9..230f2b0478 100644 --- a/google/cloud/aiplatform_v1beta1/types/__init__.py +++ b/google/cloud/aiplatform_v1beta1/types/__init__.py @@ -169,6 +169,102 @@ EvaluatedAnnotation, EvaluatedAnnotationExplanation, ) +from .evaluation_service import ( + BleuInput, + BleuInstance, + BleuMetricValue, + BleuResults, + BleuSpec, + CoherenceInput, + CoherenceInstance, + CoherenceResult, + CoherenceSpec, + EvaluateInstancesRequest, + EvaluateInstancesResponse, + ExactMatchInput, + ExactMatchInstance, + ExactMatchMetricValue, + ExactMatchResults, + ExactMatchSpec, + FluencyInput, + FluencyInstance, + FluencyResult, + FluencySpec, + FulfillmentInput, + FulfillmentInstance, + FulfillmentResult, + FulfillmentSpec, + GroundednessInput, + GroundednessInstance, + GroundednessResult, + GroundednessSpec, + PairwiseQuestionAnsweringQualityInput, + PairwiseQuestionAnsweringQualityInstance, + PairwiseQuestionAnsweringQualityResult, + PairwiseQuestionAnsweringQualitySpec, + PairwiseSummarizationQualityInput, + PairwiseSummarizationQualityInstance, + PairwiseSummarizationQualityResult, + PairwiseSummarizationQualitySpec, + QuestionAnsweringCorrectnessInput, + QuestionAnsweringCorrectnessInstance, + QuestionAnsweringCorrectnessResult, + QuestionAnsweringCorrectnessSpec, + QuestionAnsweringHelpfulnessInput, + QuestionAnsweringHelpfulnessInstance, + QuestionAnsweringHelpfulnessResult, + QuestionAnsweringHelpfulnessSpec, + QuestionAnsweringQualityInput, + QuestionAnsweringQualityInstance, + QuestionAnsweringQualityResult, + QuestionAnsweringQualitySpec, + QuestionAnsweringRelevanceInput, + QuestionAnsweringRelevanceInstance, + QuestionAnsweringRelevanceResult, + QuestionAnsweringRelevanceSpec, + RougeInput, + RougeInstance, + RougeMetricValue, + RougeResults, + RougeSpec, + SafetyInput, + SafetyInstance, + SafetyResult, + SafetySpec, + SummarizationHelpfulnessInput, + SummarizationHelpfulnessInstance, + SummarizationHelpfulnessResult, + SummarizationHelpfulnessSpec, + SummarizationQualityInput, + SummarizationQualityInstance, + SummarizationQualityResult, + SummarizationQualitySpec, + SummarizationVerbosityInput, + SummarizationVerbosityInstance, + SummarizationVerbosityResult, + SummarizationVerbositySpec, + ToolCallValidInput, + ToolCallValidInstance, + ToolCallValidMetricValue, + ToolCallValidResults, + ToolCallValidSpec, + ToolNameMatchInput, + ToolNameMatchInstance, + ToolNameMatchMetricValue, + ToolNameMatchResults, + ToolNameMatchSpec, + ToolParameterKeyMatchInput, + ToolParameterKeyMatchInstance, + ToolParameterKeyMatchMetricValue, + ToolParameterKeyMatchResults, + ToolParameterKeyMatchSpec, + ToolParameterKVMatchInput, + ToolParameterKVMatchInstance, + ToolParameterKVMatchMetricValue, + ToolParameterKVMatchResults, + ToolParameterKVMatchSpec, + PairwiseChoice, +) from .event import ( Event, ) @@ -198,6 +294,31 @@ from .explanation_metadata import ( ExplanationMetadata, ) +from .extension import ( + AuthConfig, + Extension, + ExtensionManifest, + ExtensionOperation, + ExtensionPrivateServiceConnectConfig, + RuntimeConfig, + AuthType, + HttpElementLocation, +) +from .extension_execution_service import ( + ExecuteExtensionRequest, + ExecuteExtensionResponse, + QueryExtensionRequest, + QueryExtensionResponse, +) +from .extension_registry_service import ( + DeleteExtensionRequest, + GetExtensionRequest, + ImportExtensionOperationMetadata, + ImportExtensionRequest, + ListExtensionsRequest, + ListExtensionsResponse, + UpdateExtensionRequest, +) from .feature import ( Feature, ) @@ -381,8 +502,10 @@ ContainerRegistryDestination, CsvDestination, CsvSource, + DirectUploadSource, GcsDestination, GcsSource, + GoogleDriveSource, TFRecordDestination, ) from .job_service import ( @@ -452,6 +575,7 @@ NfsMount, PersistentDiskSpec, ResourcesConsumed, + ShieldedVmConfig, ) from .manual_batch_tuning_parameters import ( ManualBatchTuningParameters, @@ -610,6 +734,43 @@ NasTrial, NasTrialDetail, ) +from .network_spec import ( + NetworkSpec, +) +from .notebook_euc_config import ( + NotebookEucConfig, +) +from .notebook_idle_shutdown_config import ( + NotebookIdleShutdownConfig, +) +from .notebook_runtime import ( + NotebookRuntime, + NotebookRuntimeTemplate, + NotebookRuntimeType, +) +from .notebook_runtime_template_ref import ( + NotebookRuntimeTemplateRef, +) +from .notebook_service import ( + AssignNotebookRuntimeOperationMetadata, + AssignNotebookRuntimeRequest, + CreateNotebookRuntimeTemplateOperationMetadata, + CreateNotebookRuntimeTemplateRequest, + DeleteNotebookRuntimeRequest, + DeleteNotebookRuntimeTemplateRequest, + GetNotebookRuntimeRequest, + GetNotebookRuntimeTemplateRequest, + ListNotebookRuntimesRequest, + ListNotebookRuntimesResponse, + ListNotebookRuntimeTemplatesRequest, + ListNotebookRuntimeTemplatesResponse, + StartNotebookRuntimeOperationMetadata, + StartNotebookRuntimeRequest, + StartNotebookRuntimeResponse, + UpgradeNotebookRuntimeOperationMetadata, + UpgradeNotebookRuntimeRequest, + UpgradeNotebookRuntimeResponse, +) from .openapi import ( Schema, Type, @@ -634,6 +795,8 @@ GetPersistentResourceRequest, ListPersistentResourcesRequest, ListPersistentResourcesResponse, + RebootPersistentResourceOperationMetadata, + RebootPersistentResourceRequest, UpdatePersistentResourceOperationMetadata, UpdatePersistentResourceRequest, ) @@ -670,6 +833,7 @@ PipelineState, ) from .prediction_service import ( + ChatCompletionsRequest, CountTokensRequest, CountTokensResponse, DirectPredictRequest, @@ -829,7 +993,9 @@ Retrieval, Tool, ToolConfig, + ToolUseExample, VertexAISearch, + VertexRagStore, ) from .training_pipeline import ( FilterSplit, @@ -856,6 +1022,36 @@ from .value import ( Value, ) +from .vertex_rag_data import ( + ImportRagFilesConfig, + RagCorpus, + RagFile, + RagFileChunkingConfig, + UploadRagFileConfig, +) +from .vertex_rag_data_service import ( + CreateRagCorpusOperationMetadata, + CreateRagCorpusRequest, + DeleteRagCorpusRequest, + DeleteRagFileRequest, + GetRagCorpusRequest, + GetRagFileRequest, + ImportRagFilesOperationMetadata, + ImportRagFilesRequest, + ImportRagFilesResponse, + ListRagCorporaRequest, + ListRagCorporaResponse, + ListRagFilesRequest, + ListRagFilesResponse, + UploadRagFileRequest, + UploadRagFileResponse, +) +from .vertex_rag_service import ( + RagContexts, + RagQuery, + RetrieveContextsRequest, + RetrieveContextsResponse, +) from .vizier_service import ( AddTrialMeasurementRequest, CheckTrialEarlyStoppingStateMetatdata, @@ -990,6 +1186,100 @@ "ErrorAnalysisAnnotation", "EvaluatedAnnotation", "EvaluatedAnnotationExplanation", + "BleuInput", + "BleuInstance", + "BleuMetricValue", + "BleuResults", + "BleuSpec", + "CoherenceInput", + "CoherenceInstance", + "CoherenceResult", + "CoherenceSpec", + "EvaluateInstancesRequest", + "EvaluateInstancesResponse", + "ExactMatchInput", + "ExactMatchInstance", + "ExactMatchMetricValue", + "ExactMatchResults", + "ExactMatchSpec", + "FluencyInput", + "FluencyInstance", + "FluencyResult", + "FluencySpec", + "FulfillmentInput", + "FulfillmentInstance", + "FulfillmentResult", + "FulfillmentSpec", + "GroundednessInput", + "GroundednessInstance", + "GroundednessResult", + "GroundednessSpec", + "PairwiseQuestionAnsweringQualityInput", + "PairwiseQuestionAnsweringQualityInstance", + "PairwiseQuestionAnsweringQualityResult", + "PairwiseQuestionAnsweringQualitySpec", + "PairwiseSummarizationQualityInput", + "PairwiseSummarizationQualityInstance", + "PairwiseSummarizationQualityResult", + "PairwiseSummarizationQualitySpec", + "QuestionAnsweringCorrectnessInput", + "QuestionAnsweringCorrectnessInstance", + "QuestionAnsweringCorrectnessResult", + "QuestionAnsweringCorrectnessSpec", + "QuestionAnsweringHelpfulnessInput", + "QuestionAnsweringHelpfulnessInstance", + "QuestionAnsweringHelpfulnessResult", + "QuestionAnsweringHelpfulnessSpec", + "QuestionAnsweringQualityInput", + "QuestionAnsweringQualityInstance", + "QuestionAnsweringQualityResult", + "QuestionAnsweringQualitySpec", + "QuestionAnsweringRelevanceInput", + "QuestionAnsweringRelevanceInstance", + "QuestionAnsweringRelevanceResult", + "QuestionAnsweringRelevanceSpec", + "RougeInput", + "RougeInstance", + "RougeMetricValue", + "RougeResults", + "RougeSpec", + "SafetyInput", + "SafetyInstance", + "SafetyResult", + "SafetySpec", + "SummarizationHelpfulnessInput", + "SummarizationHelpfulnessInstance", + "SummarizationHelpfulnessResult", + "SummarizationHelpfulnessSpec", + "SummarizationQualityInput", + "SummarizationQualityInstance", + "SummarizationQualityResult", + "SummarizationQualitySpec", + "SummarizationVerbosityInput", + "SummarizationVerbosityInstance", + "SummarizationVerbosityResult", + "SummarizationVerbositySpec", + "ToolCallValidInput", + "ToolCallValidInstance", + "ToolCallValidMetricValue", + "ToolCallValidResults", + "ToolCallValidSpec", + "ToolNameMatchInput", + "ToolNameMatchInstance", + "ToolNameMatchMetricValue", + "ToolNameMatchResults", + "ToolNameMatchSpec", + "ToolParameterKeyMatchInput", + "ToolParameterKeyMatchInstance", + "ToolParameterKeyMatchMetricValue", + "ToolParameterKeyMatchResults", + "ToolParameterKeyMatchSpec", + "ToolParameterKVMatchInput", + "ToolParameterKVMatchInstance", + "ToolParameterKVMatchMetricValue", + "ToolParameterKVMatchResults", + "ToolParameterKVMatchSpec", + "PairwiseChoice", "Event", "Execution", "Attribution", @@ -1011,6 +1301,25 @@ "SmoothGradConfig", "XraiAttribution", "ExplanationMetadata", + "AuthConfig", + "Extension", + "ExtensionManifest", + "ExtensionOperation", + "ExtensionPrivateServiceConnectConfig", + "RuntimeConfig", + "AuthType", + "HttpElementLocation", + "ExecuteExtensionRequest", + "ExecuteExtensionResponse", + "QueryExtensionRequest", + "QueryExtensionResponse", + "DeleteExtensionRequest", + "GetExtensionRequest", + "ImportExtensionOperationMetadata", + "ImportExtensionRequest", + "ListExtensionsRequest", + "ListExtensionsResponse", + "UpdateExtensionRequest", "Feature", "FeatureGroup", "FeatureStatsAnomaly", @@ -1155,8 +1464,10 @@ "ContainerRegistryDestination", "CsvDestination", "CsvSource", + "DirectUploadSource", "GcsDestination", "GcsSource", + "GoogleDriveSource", "TFRecordDestination", "CancelBatchPredictionJobRequest", "CancelCustomJobRequest", @@ -1216,6 +1527,7 @@ "NfsMount", "PersistentDiskSpec", "ResourcesConsumed", + "ShieldedVmConfig", "ManualBatchTuningParameters", "FindNeighborsRequest", "FindNeighborsResponse", @@ -1343,6 +1655,31 @@ "NasJobSpec", "NasTrial", "NasTrialDetail", + "NetworkSpec", + "NotebookEucConfig", + "NotebookIdleShutdownConfig", + "NotebookRuntime", + "NotebookRuntimeTemplate", + "NotebookRuntimeType", + "NotebookRuntimeTemplateRef", + "AssignNotebookRuntimeOperationMetadata", + "AssignNotebookRuntimeRequest", + "CreateNotebookRuntimeTemplateOperationMetadata", + "CreateNotebookRuntimeTemplateRequest", + "DeleteNotebookRuntimeRequest", + "DeleteNotebookRuntimeTemplateRequest", + "GetNotebookRuntimeRequest", + "GetNotebookRuntimeTemplateRequest", + "ListNotebookRuntimesRequest", + "ListNotebookRuntimesResponse", + "ListNotebookRuntimeTemplatesRequest", + "ListNotebookRuntimeTemplatesResponse", + "StartNotebookRuntimeOperationMetadata", + "StartNotebookRuntimeRequest", + "StartNotebookRuntimeResponse", + "UpgradeNotebookRuntimeOperationMetadata", + "UpgradeNotebookRuntimeRequest", + "UpgradeNotebookRuntimeResponse", "Schema", "Type", "DeleteOperationMetadata", @@ -1360,6 +1697,8 @@ "GetPersistentResourceRequest", "ListPersistentResourcesRequest", "ListPersistentResourcesResponse", + "RebootPersistentResourceOperationMetadata", + "RebootPersistentResourceRequest", "UpdatePersistentResourceOperationMetadata", "UpdatePersistentResourceRequest", "PipelineFailurePolicy", @@ -1386,6 +1725,7 @@ "ListTrainingPipelinesRequest", "ListTrainingPipelinesResponse", "PipelineState", + "ChatCompletionsRequest", "CountTokensRequest", "CountTokensResponse", "DirectPredictRequest", @@ -1509,7 +1849,9 @@ "Retrieval", "Tool", "ToolConfig", + "ToolUseExample", "VertexAISearch", + "VertexRagStore", "FilterSplit", "FractionSplit", "InputDataConfig", @@ -1525,6 +1867,30 @@ "UnmanagedContainerModel", "UserActionReference", "Value", + "ImportRagFilesConfig", + "RagCorpus", + "RagFile", + "RagFileChunkingConfig", + "UploadRagFileConfig", + "CreateRagCorpusOperationMetadata", + "CreateRagCorpusRequest", + "DeleteRagCorpusRequest", + "DeleteRagFileRequest", + "GetRagCorpusRequest", + "GetRagFileRequest", + "ImportRagFilesOperationMetadata", + "ImportRagFilesRequest", + "ImportRagFilesResponse", + "ListRagCorporaRequest", + "ListRagCorporaResponse", + "ListRagFilesRequest", + "ListRagFilesResponse", + "UploadRagFileRequest", + "UploadRagFileResponse", + "RagContexts", + "RagQuery", + "RetrieveContextsRequest", + "RetrieveContextsResponse", "AddTrialMeasurementRequest", "CheckTrialEarlyStoppingStateMetatdata", "CheckTrialEarlyStoppingStateRequest", diff --git a/google/cloud/aiplatform_v1beta1/types/content.py b/google/cloud/aiplatform_v1beta1/types/content.py index 00005049c0..a4b7b78bd8 100644 --- a/google/cloud/aiplatform_v1beta1/types/content.py +++ b/google/cloud/aiplatform_v1beta1/types/content.py @@ -192,16 +192,18 @@ class Part(proto.Message): class Blob(proto.Message): - r"""Raw media bytes. + r"""Content blob. - Text should not be sent as raw bytes, use the 'text' field. + It's preferred to send as + [text][google.cloud.aiplatform.v1beta1.Part.text] directly rather + than raw bytes. Attributes: mime_type (str): Required. The IANA standard MIME type of the source data. data (bytes): - Required. Raw bytes for media formats. + Required. Raw bytes. """ mime_type: str = proto.Field( @@ -289,6 +291,23 @@ class GenerationConfig(proto.Message): This field is a member of `oneof`_ ``_max_output_tokens``. stop_sequences (MutableSequence[str]): Optional. Stop sequences. + presence_penalty (float): + Optional. Positive penalties. + + This field is a member of `oneof`_ ``_presence_penalty``. + frequency_penalty (float): + Optional. Frequency penalties. + + This field is a member of `oneof`_ ``_frequency_penalty``. + response_mime_type (str): + Optional. Output response mimetype of the generated + candidate text. Supported mimetype: + + - ``text/plain``: (default) Text output. + - ``application/json``: JSON response in the candidates. + The model needs to be prompted to output the appropriate + response type, otherwise the behavior is undefined. This + is a preview feature. """ temperature: float = proto.Field( @@ -320,6 +339,20 @@ class GenerationConfig(proto.Message): proto.STRING, number=6, ) + presence_penalty: float = proto.Field( + proto.FLOAT, + number=8, + optional=True, + ) + frequency_penalty: float = proto.Field( + proto.FLOAT, + number=9, + optional=True, + ) + response_mime_type: str = proto.Field( + proto.STRING, + number=13, + ) class SafetySetting(proto.Message): @@ -330,6 +363,11 @@ class SafetySetting(proto.Message): Required. Harm category. threshold (google.cloud.aiplatform_v1beta1.types.SafetySetting.HarmBlockThreshold): Required. The harm block threshold. + method (google.cloud.aiplatform_v1beta1.types.SafetySetting.HarmBlockMethod): + Optional. Specify if the threshold is used + for probability or severity score. If not + specified, the threshold is used for probability + score. """ class HarmBlockThreshold(proto.Enum): @@ -354,6 +392,23 @@ class HarmBlockThreshold(proto.Enum): BLOCK_ONLY_HIGH = 3 BLOCK_NONE = 4 + class HarmBlockMethod(proto.Enum): + r"""Probability vs severity. + + Values: + HARM_BLOCK_METHOD_UNSPECIFIED (0): + The harm block method is unspecified. + SEVERITY (1): + The harm block method uses both probability + and severity scores. + PROBABILITY (2): + The harm block method uses the probability + score. + """ + HARM_BLOCK_METHOD_UNSPECIFIED = 0 + SEVERITY = 1 + PROBABILITY = 2 + category: "HarmCategory" = proto.Field( proto.ENUM, number=1, @@ -364,6 +419,11 @@ class HarmBlockThreshold(proto.Enum): number=2, enum=HarmBlockThreshold, ) + method: HarmBlockMethod = proto.Field( + proto.ENUM, + number=4, + enum=HarmBlockMethod, + ) class SafetyRating(proto.Message): @@ -669,12 +729,22 @@ class Segment(proto.Message): class GroundingAttribution(proto.Message): r"""Grounding attribution. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: web (google.cloud.aiplatform_v1beta1.types.GroundingAttribution.Web): Optional. Attribution from the web. + This field is a member of `oneof`_ ``reference``. + retrieved_context (google.cloud.aiplatform_v1beta1.types.GroundingAttribution.RetrievedContext): + Optional. Attribution from context retrieved + by the retrieval tools. + This field is a member of `oneof`_ ``reference``. segment (google.cloud.aiplatform_v1beta1.types.Segment): Output only. Segment of the content this @@ -707,12 +777,38 @@ class Web(proto.Message): number=2, ) + class RetrievedContext(proto.Message): + r"""Attribution from context retrieved by the retrieval tools. + + Attributes: + uri (str): + Output only. URI reference of the + attribution. + title (str): + Output only. Title of the attribution. + """ + + uri: str = proto.Field( + proto.STRING, + number=1, + ) + title: str = proto.Field( + proto.STRING, + number=2, + ) + web: Web = proto.Field( proto.MESSAGE, number=3, oneof="reference", message=Web, ) + retrieved_context: RetrievedContext = proto.Field( + proto.MESSAGE, + number=4, + oneof="reference", + message=RetrievedContext, + ) segment: "Segment" = proto.Field( proto.MESSAGE, number=1, @@ -732,6 +828,9 @@ class GroundingMetadata(proto.Message): web_search_queries (MutableSequence[str]): Optional. Web search queries for the following-up web search. + retrieval_queries (MutableSequence[str]): + Optional. Queries executed by the retrieval + tools. grounding_attributions (MutableSequence[google.cloud.aiplatform_v1beta1.types.GroundingAttribution]): Optional. List of grounding attributions. """ @@ -740,6 +839,10 @@ class GroundingMetadata(proto.Message): proto.STRING, number=1, ) + retrieval_queries: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) grounding_attributions: MutableSequence[ "GroundingAttribution" ] = proto.RepeatedField( diff --git a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py index f702e88781..6f4dda8ce8 100644 --- a/google/cloud/aiplatform_v1beta1/types/endpoint_service.py +++ b/google/cloud/aiplatform_v1beta1/types/endpoint_service.py @@ -138,19 +138,19 @@ class ListEndpointsRequest(proto.Message): request. For field names both snake_case and camelCase are supported. - - ``endpoint`` supports = and !=. ``endpoint`` represents - the Endpoint ID, i.e. the last segment of the Endpoint's - [resource + - ``endpoint`` supports ``=`` and ``!=``. ``endpoint`` + represents the Endpoint ID, i.e. the last segment of the + Endpoint's [resource name][google.cloud.aiplatform.v1beta1.Endpoint.name]. - - ``display_name`` supports = and, != + - ``display_name`` supports ``=`` and ``!=``. - ``labels`` supports general map functions that is: - ``labels.key=value`` - key:value equality - - \`labels.key:\* or labels:key - key existence + - ``labels.key:*`` or ``labels:key`` - key existence - A key including a space must be quoted. ``labels."a key"``. - - ``base_model_name`` only supports = + - ``base_model_name`` only supports ``=``. Some examples: diff --git a/google/cloud/aiplatform_v1beta1/types/evaluation_service.py b/google/cloud/aiplatform_v1beta1/types/evaluation_service.py new file mode 100644 index 0000000000..fdedf72c71 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/evaluation_service.py @@ -0,0 +1,2981 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "PairwiseChoice", + "EvaluateInstancesRequest", + "EvaluateInstancesResponse", + "ExactMatchInput", + "ExactMatchInstance", + "ExactMatchSpec", + "ExactMatchResults", + "ExactMatchMetricValue", + "BleuInput", + "BleuInstance", + "BleuSpec", + "BleuResults", + "BleuMetricValue", + "RougeInput", + "RougeInstance", + "RougeSpec", + "RougeResults", + "RougeMetricValue", + "CoherenceInput", + "CoherenceInstance", + "CoherenceSpec", + "CoherenceResult", + "FluencyInput", + "FluencyInstance", + "FluencySpec", + "FluencyResult", + "SafetyInput", + "SafetyInstance", + "SafetySpec", + "SafetyResult", + "GroundednessInput", + "GroundednessInstance", + "GroundednessSpec", + "GroundednessResult", + "FulfillmentInput", + "FulfillmentInstance", + "FulfillmentSpec", + "FulfillmentResult", + "SummarizationQualityInput", + "SummarizationQualityInstance", + "SummarizationQualitySpec", + "SummarizationQualityResult", + "PairwiseSummarizationQualityInput", + "PairwiseSummarizationQualityInstance", + "PairwiseSummarizationQualitySpec", + "PairwiseSummarizationQualityResult", + "SummarizationHelpfulnessInput", + "SummarizationHelpfulnessInstance", + "SummarizationHelpfulnessSpec", + "SummarizationHelpfulnessResult", + "SummarizationVerbosityInput", + "SummarizationVerbosityInstance", + "SummarizationVerbositySpec", + "SummarizationVerbosityResult", + "QuestionAnsweringQualityInput", + "QuestionAnsweringQualityInstance", + "QuestionAnsweringQualitySpec", + "QuestionAnsweringQualityResult", + "PairwiseQuestionAnsweringQualityInput", + "PairwiseQuestionAnsweringQualityInstance", + "PairwiseQuestionAnsweringQualitySpec", + "PairwiseQuestionAnsweringQualityResult", + "QuestionAnsweringRelevanceInput", + "QuestionAnsweringRelevanceInstance", + "QuestionAnsweringRelevanceSpec", + "QuestionAnsweringRelevanceResult", + "QuestionAnsweringHelpfulnessInput", + "QuestionAnsweringHelpfulnessInstance", + "QuestionAnsweringHelpfulnessSpec", + "QuestionAnsweringHelpfulnessResult", + "QuestionAnsweringCorrectnessInput", + "QuestionAnsweringCorrectnessInstance", + "QuestionAnsweringCorrectnessSpec", + "QuestionAnsweringCorrectnessResult", + "ToolCallValidInput", + "ToolCallValidSpec", + "ToolCallValidInstance", + "ToolCallValidResults", + "ToolCallValidMetricValue", + "ToolNameMatchInput", + "ToolNameMatchSpec", + "ToolNameMatchInstance", + "ToolNameMatchResults", + "ToolNameMatchMetricValue", + "ToolParameterKeyMatchInput", + "ToolParameterKeyMatchSpec", + "ToolParameterKeyMatchInstance", + "ToolParameterKeyMatchResults", + "ToolParameterKeyMatchMetricValue", + "ToolParameterKVMatchInput", + "ToolParameterKVMatchSpec", + "ToolParameterKVMatchInstance", + "ToolParameterKVMatchResults", + "ToolParameterKVMatchMetricValue", + }, +) + + +class PairwiseChoice(proto.Enum): + r"""Pairwise prediction autorater preference. + + Values: + PAIRWISE_CHOICE_UNSPECIFIED (0): + Unspecified prediction choice. + BASELINE (1): + Baseline prediction wins + CANDIDATE (2): + Candidate prediction wins + TIE (3): + Winner cannot be determined + """ + PAIRWISE_CHOICE_UNSPECIFIED = 0 + BASELINE = 1 + CANDIDATE = 2 + TIE = 3 + + +class EvaluateInstancesRequest(proto.Message): + r"""Request message for EvaluationService.EvaluateInstances. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + exact_match_input (google.cloud.aiplatform_v1beta1.types.ExactMatchInput): + Auto metric instances. + Instances and metric spec for exact match + metric. + + This field is a member of `oneof`_ ``metric_inputs``. + bleu_input (google.cloud.aiplatform_v1beta1.types.BleuInput): + Instances and metric spec for bleu metric. + + This field is a member of `oneof`_ ``metric_inputs``. + rouge_input (google.cloud.aiplatform_v1beta1.types.RougeInput): + Instances and metric spec for rouge metric. + + This field is a member of `oneof`_ ``metric_inputs``. + fluency_input (google.cloud.aiplatform_v1beta1.types.FluencyInput): + LLM-based metric instance. + General text generation metrics, applicable to + other categories. Input for fluency metric. + + This field is a member of `oneof`_ ``metric_inputs``. + coherence_input (google.cloud.aiplatform_v1beta1.types.CoherenceInput): + Input for coherence metric. + + This field is a member of `oneof`_ ``metric_inputs``. + safety_input (google.cloud.aiplatform_v1beta1.types.SafetyInput): + Input for safety metric. + + This field is a member of `oneof`_ ``metric_inputs``. + groundedness_input (google.cloud.aiplatform_v1beta1.types.GroundednessInput): + Input for groundedness metric. + + This field is a member of `oneof`_ ``metric_inputs``. + fulfillment_input (google.cloud.aiplatform_v1beta1.types.FulfillmentInput): + Input for fulfillment metric. + + This field is a member of `oneof`_ ``metric_inputs``. + summarization_quality_input (google.cloud.aiplatform_v1beta1.types.SummarizationQualityInput): + Input for summarization quality metric. + + This field is a member of `oneof`_ ``metric_inputs``. + pairwise_summarization_quality_input (google.cloud.aiplatform_v1beta1.types.PairwiseSummarizationQualityInput): + Input for pairwise summarization quality + metric. + + This field is a member of `oneof`_ ``metric_inputs``. + summarization_helpfulness_input (google.cloud.aiplatform_v1beta1.types.SummarizationHelpfulnessInput): + Input for summarization helpfulness metric. + + This field is a member of `oneof`_ ``metric_inputs``. + summarization_verbosity_input (google.cloud.aiplatform_v1beta1.types.SummarizationVerbosityInput): + Input for summarization verbosity metric. + + This field is a member of `oneof`_ ``metric_inputs``. + question_answering_quality_input (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringQualityInput): + Input for question answering quality metric. + + This field is a member of `oneof`_ ``metric_inputs``. + pairwise_question_answering_quality_input (google.cloud.aiplatform_v1beta1.types.PairwiseQuestionAnsweringQualityInput): + Input for pairwise question answering quality + metric. + + This field is a member of `oneof`_ ``metric_inputs``. + question_answering_relevance_input (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringRelevanceInput): + Input for question answering relevance + metric. + + This field is a member of `oneof`_ ``metric_inputs``. + question_answering_helpfulness_input (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringHelpfulnessInput): + Input for question answering helpfulness + metric. + + This field is a member of `oneof`_ ``metric_inputs``. + question_answering_correctness_input (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringCorrectnessInput): + Input for question answering correctness + metric. + + This field is a member of `oneof`_ ``metric_inputs``. + tool_call_valid_input (google.cloud.aiplatform_v1beta1.types.ToolCallValidInput): + Tool call metric instances. + Input for tool call valid metric. + + This field is a member of `oneof`_ ``metric_inputs``. + tool_name_match_input (google.cloud.aiplatform_v1beta1.types.ToolNameMatchInput): + Input for tool name match metric. + + This field is a member of `oneof`_ ``metric_inputs``. + tool_parameter_key_match_input (google.cloud.aiplatform_v1beta1.types.ToolParameterKeyMatchInput): + Input for tool parameter key match metric. + + This field is a member of `oneof`_ ``metric_inputs``. + tool_parameter_kv_match_input (google.cloud.aiplatform_v1beta1.types.ToolParameterKVMatchInput): + Input for tool parameter key value match + metric. + + This field is a member of `oneof`_ ``metric_inputs``. + location (str): + Required. The resource name of the Location to evaluate the + instances. Format: + ``projects/{project}/locations/{location}`` + """ + + exact_match_input: "ExactMatchInput" = proto.Field( + proto.MESSAGE, + number=2, + oneof="metric_inputs", + message="ExactMatchInput", + ) + bleu_input: "BleuInput" = proto.Field( + proto.MESSAGE, + number=3, + oneof="metric_inputs", + message="BleuInput", + ) + rouge_input: "RougeInput" = proto.Field( + proto.MESSAGE, + number=4, + oneof="metric_inputs", + message="RougeInput", + ) + fluency_input: "FluencyInput" = proto.Field( + proto.MESSAGE, + number=5, + oneof="metric_inputs", + message="FluencyInput", + ) + coherence_input: "CoherenceInput" = proto.Field( + proto.MESSAGE, + number=6, + oneof="metric_inputs", + message="CoherenceInput", + ) + safety_input: "SafetyInput" = proto.Field( + proto.MESSAGE, + number=8, + oneof="metric_inputs", + message="SafetyInput", + ) + groundedness_input: "GroundednessInput" = proto.Field( + proto.MESSAGE, + number=9, + oneof="metric_inputs", + message="GroundednessInput", + ) + fulfillment_input: "FulfillmentInput" = proto.Field( + proto.MESSAGE, + number=12, + oneof="metric_inputs", + message="FulfillmentInput", + ) + summarization_quality_input: "SummarizationQualityInput" = proto.Field( + proto.MESSAGE, + number=7, + oneof="metric_inputs", + message="SummarizationQualityInput", + ) + pairwise_summarization_quality_input: "PairwiseSummarizationQualityInput" = ( + proto.Field( + proto.MESSAGE, + number=23, + oneof="metric_inputs", + message="PairwiseSummarizationQualityInput", + ) + ) + summarization_helpfulness_input: "SummarizationHelpfulnessInput" = proto.Field( + proto.MESSAGE, + number=14, + oneof="metric_inputs", + message="SummarizationHelpfulnessInput", + ) + summarization_verbosity_input: "SummarizationVerbosityInput" = proto.Field( + proto.MESSAGE, + number=15, + oneof="metric_inputs", + message="SummarizationVerbosityInput", + ) + question_answering_quality_input: "QuestionAnsweringQualityInput" = proto.Field( + proto.MESSAGE, + number=10, + oneof="metric_inputs", + message="QuestionAnsweringQualityInput", + ) + pairwise_question_answering_quality_input: "PairwiseQuestionAnsweringQualityInput" = proto.Field( + proto.MESSAGE, + number=24, + oneof="metric_inputs", + message="PairwiseQuestionAnsweringQualityInput", + ) + question_answering_relevance_input: "QuestionAnsweringRelevanceInput" = proto.Field( + proto.MESSAGE, + number=16, + oneof="metric_inputs", + message="QuestionAnsweringRelevanceInput", + ) + question_answering_helpfulness_input: "QuestionAnsweringHelpfulnessInput" = ( + proto.Field( + proto.MESSAGE, + number=17, + oneof="metric_inputs", + message="QuestionAnsweringHelpfulnessInput", + ) + ) + question_answering_correctness_input: "QuestionAnsweringCorrectnessInput" = ( + proto.Field( + proto.MESSAGE, + number=18, + oneof="metric_inputs", + message="QuestionAnsweringCorrectnessInput", + ) + ) + tool_call_valid_input: "ToolCallValidInput" = proto.Field( + proto.MESSAGE, + number=19, + oneof="metric_inputs", + message="ToolCallValidInput", + ) + tool_name_match_input: "ToolNameMatchInput" = proto.Field( + proto.MESSAGE, + number=20, + oneof="metric_inputs", + message="ToolNameMatchInput", + ) + tool_parameter_key_match_input: "ToolParameterKeyMatchInput" = proto.Field( + proto.MESSAGE, + number=21, + oneof="metric_inputs", + message="ToolParameterKeyMatchInput", + ) + tool_parameter_kv_match_input: "ToolParameterKVMatchInput" = proto.Field( + proto.MESSAGE, + number=22, + oneof="metric_inputs", + message="ToolParameterKVMatchInput", + ) + location: str = proto.Field( + proto.STRING, + number=1, + ) + + +class EvaluateInstancesResponse(proto.Message): + r"""Response message for EvaluationService.EvaluateInstances. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + exact_match_results (google.cloud.aiplatform_v1beta1.types.ExactMatchResults): + Auto metric evaluation results. + Results for exact match metric. + + This field is a member of `oneof`_ ``evaluation_results``. + bleu_results (google.cloud.aiplatform_v1beta1.types.BleuResults): + Results for bleu metric. + + This field is a member of `oneof`_ ``evaluation_results``. + rouge_results (google.cloud.aiplatform_v1beta1.types.RougeResults): + Results for rouge metric. + + This field is a member of `oneof`_ ``evaluation_results``. + fluency_result (google.cloud.aiplatform_v1beta1.types.FluencyResult): + LLM-based metric evaluation result. + General text generation metrics, applicable to + other categories. Result for fluency metric. + + This field is a member of `oneof`_ ``evaluation_results``. + coherence_result (google.cloud.aiplatform_v1beta1.types.CoherenceResult): + Result for coherence metric. + + This field is a member of `oneof`_ ``evaluation_results``. + safety_result (google.cloud.aiplatform_v1beta1.types.SafetyResult): + Result for safety metric. + + This field is a member of `oneof`_ ``evaluation_results``. + groundedness_result (google.cloud.aiplatform_v1beta1.types.GroundednessResult): + Result for groundedness metric. + + This field is a member of `oneof`_ ``evaluation_results``. + fulfillment_result (google.cloud.aiplatform_v1beta1.types.FulfillmentResult): + Result for fulfillment metric. + + This field is a member of `oneof`_ ``evaluation_results``. + summarization_quality_result (google.cloud.aiplatform_v1beta1.types.SummarizationQualityResult): + Summarization only metrics. + Result for summarization quality metric. + + This field is a member of `oneof`_ ``evaluation_results``. + pairwise_summarization_quality_result (google.cloud.aiplatform_v1beta1.types.PairwiseSummarizationQualityResult): + Result for pairwise summarization quality + metric. + + This field is a member of `oneof`_ ``evaluation_results``. + summarization_helpfulness_result (google.cloud.aiplatform_v1beta1.types.SummarizationHelpfulnessResult): + Result for summarization helpfulness metric. + + This field is a member of `oneof`_ ``evaluation_results``. + summarization_verbosity_result (google.cloud.aiplatform_v1beta1.types.SummarizationVerbosityResult): + Result for summarization verbosity metric. + + This field is a member of `oneof`_ ``evaluation_results``. + question_answering_quality_result (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringQualityResult): + Question answering only metrics. + Result for question answering quality metric. + + This field is a member of `oneof`_ ``evaluation_results``. + pairwise_question_answering_quality_result (google.cloud.aiplatform_v1beta1.types.PairwiseQuestionAnsweringQualityResult): + Result for pairwise question answering + quality metric. + + This field is a member of `oneof`_ ``evaluation_results``. + question_answering_relevance_result (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringRelevanceResult): + Result for question answering relevance + metric. + + This field is a member of `oneof`_ ``evaluation_results``. + question_answering_helpfulness_result (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringHelpfulnessResult): + Result for question answering helpfulness + metric. + + This field is a member of `oneof`_ ``evaluation_results``. + question_answering_correctness_result (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringCorrectnessResult): + Result for question answering correctness + metric. + + This field is a member of `oneof`_ ``evaluation_results``. + tool_call_valid_results (google.cloud.aiplatform_v1beta1.types.ToolCallValidResults): + Tool call metrics. + Results for tool call valid metric. + + This field is a member of `oneof`_ ``evaluation_results``. + tool_name_match_results (google.cloud.aiplatform_v1beta1.types.ToolNameMatchResults): + Results for tool name match metric. + + This field is a member of `oneof`_ ``evaluation_results``. + tool_parameter_key_match_results (google.cloud.aiplatform_v1beta1.types.ToolParameterKeyMatchResults): + Results for tool parameter key match metric. + + This field is a member of `oneof`_ ``evaluation_results``. + tool_parameter_kv_match_results (google.cloud.aiplatform_v1beta1.types.ToolParameterKVMatchResults): + Results for tool parameter key value match + metric. + + This field is a member of `oneof`_ ``evaluation_results``. + """ + + exact_match_results: "ExactMatchResults" = proto.Field( + proto.MESSAGE, + number=1, + oneof="evaluation_results", + message="ExactMatchResults", + ) + bleu_results: "BleuResults" = proto.Field( + proto.MESSAGE, + number=2, + oneof="evaluation_results", + message="BleuResults", + ) + rouge_results: "RougeResults" = proto.Field( + proto.MESSAGE, + number=3, + oneof="evaluation_results", + message="RougeResults", + ) + fluency_result: "FluencyResult" = proto.Field( + proto.MESSAGE, + number=4, + oneof="evaluation_results", + message="FluencyResult", + ) + coherence_result: "CoherenceResult" = proto.Field( + proto.MESSAGE, + number=5, + oneof="evaluation_results", + message="CoherenceResult", + ) + safety_result: "SafetyResult" = proto.Field( + proto.MESSAGE, + number=7, + oneof="evaluation_results", + message="SafetyResult", + ) + groundedness_result: "GroundednessResult" = proto.Field( + proto.MESSAGE, + number=8, + oneof="evaluation_results", + message="GroundednessResult", + ) + fulfillment_result: "FulfillmentResult" = proto.Field( + proto.MESSAGE, + number=11, + oneof="evaluation_results", + message="FulfillmentResult", + ) + summarization_quality_result: "SummarizationQualityResult" = proto.Field( + proto.MESSAGE, + number=6, + oneof="evaluation_results", + message="SummarizationQualityResult", + ) + pairwise_summarization_quality_result: "PairwiseSummarizationQualityResult" = ( + proto.Field( + proto.MESSAGE, + number=22, + oneof="evaluation_results", + message="PairwiseSummarizationQualityResult", + ) + ) + summarization_helpfulness_result: "SummarizationHelpfulnessResult" = proto.Field( + proto.MESSAGE, + number=13, + oneof="evaluation_results", + message="SummarizationHelpfulnessResult", + ) + summarization_verbosity_result: "SummarizationVerbosityResult" = proto.Field( + proto.MESSAGE, + number=14, + oneof="evaluation_results", + message="SummarizationVerbosityResult", + ) + question_answering_quality_result: "QuestionAnsweringQualityResult" = proto.Field( + proto.MESSAGE, + number=9, + oneof="evaluation_results", + message="QuestionAnsweringQualityResult", + ) + pairwise_question_answering_quality_result: "PairwiseQuestionAnsweringQualityResult" = proto.Field( + proto.MESSAGE, + number=23, + oneof="evaluation_results", + message="PairwiseQuestionAnsweringQualityResult", + ) + question_answering_relevance_result: "QuestionAnsweringRelevanceResult" = ( + proto.Field( + proto.MESSAGE, + number=15, + oneof="evaluation_results", + message="QuestionAnsweringRelevanceResult", + ) + ) + question_answering_helpfulness_result: "QuestionAnsweringHelpfulnessResult" = ( + proto.Field( + proto.MESSAGE, + number=16, + oneof="evaluation_results", + message="QuestionAnsweringHelpfulnessResult", + ) + ) + question_answering_correctness_result: "QuestionAnsweringCorrectnessResult" = ( + proto.Field( + proto.MESSAGE, + number=17, + oneof="evaluation_results", + message="QuestionAnsweringCorrectnessResult", + ) + ) + tool_call_valid_results: "ToolCallValidResults" = proto.Field( + proto.MESSAGE, + number=18, + oneof="evaluation_results", + message="ToolCallValidResults", + ) + tool_name_match_results: "ToolNameMatchResults" = proto.Field( + proto.MESSAGE, + number=19, + oneof="evaluation_results", + message="ToolNameMatchResults", + ) + tool_parameter_key_match_results: "ToolParameterKeyMatchResults" = proto.Field( + proto.MESSAGE, + number=20, + oneof="evaluation_results", + message="ToolParameterKeyMatchResults", + ) + tool_parameter_kv_match_results: "ToolParameterKVMatchResults" = proto.Field( + proto.MESSAGE, + number=21, + oneof="evaluation_results", + message="ToolParameterKVMatchResults", + ) + + +class ExactMatchInput(proto.Message): + r"""Input for exact match metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.ExactMatchSpec): + Required. Spec for exact match metric. + instances (MutableSequence[google.cloud.aiplatform_v1beta1.types.ExactMatchInstance]): + Required. Repeated exact match instances. + """ + + metric_spec: "ExactMatchSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="ExactMatchSpec", + ) + instances: MutableSequence["ExactMatchInstance"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="ExactMatchInstance", + ) + + +class ExactMatchInstance(proto.Message): + r"""Spec for exact match instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Required. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +class ExactMatchSpec(proto.Message): + r"""Spec for exact match metric - returns 1 if prediction and + reference exactly matches, otherwise 0. + + """ + + +class ExactMatchResults(proto.Message): + r"""Results for exact match metric. + + Attributes: + exact_match_metric_values (MutableSequence[google.cloud.aiplatform_v1beta1.types.ExactMatchMetricValue]): + Output only. Exact match metric values. + """ + + exact_match_metric_values: MutableSequence[ + "ExactMatchMetricValue" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ExactMatchMetricValue", + ) + + +class ExactMatchMetricValue(proto.Message): + r"""Exact match metric value for an instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Exact match score. + + This field is a member of `oneof`_ ``_score``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + + +class BleuInput(proto.Message): + r"""Input for bleu metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.BleuSpec): + Required. Spec for bleu score metric. + instances (MutableSequence[google.cloud.aiplatform_v1beta1.types.BleuInstance]): + Required. Repeated bleu instances. + """ + + metric_spec: "BleuSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="BleuSpec", + ) + instances: MutableSequence["BleuInstance"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="BleuInstance", + ) + + +class BleuInstance(proto.Message): + r"""Spec for bleu instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Required. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +class BleuSpec(proto.Message): + r"""Spec for bleu score metric - calculates the precision of + n-grams in the prediction as compared to reference - returns a + score ranging between 0 to 1. + + """ + + +class BleuResults(proto.Message): + r"""Results for bleu metric. + + Attributes: + bleu_metric_values (MutableSequence[google.cloud.aiplatform_v1beta1.types.BleuMetricValue]): + Output only. Bleu metric values. + """ + + bleu_metric_values: MutableSequence["BleuMetricValue"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="BleuMetricValue", + ) + + +class BleuMetricValue(proto.Message): + r"""Bleu metric value for an instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Bleu score. + + This field is a member of `oneof`_ ``_score``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + + +class RougeInput(proto.Message): + r"""Input for rouge metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.RougeSpec): + Required. Spec for rouge score metric. + instances (MutableSequence[google.cloud.aiplatform_v1beta1.types.RougeInstance]): + Required. Repeated rouge instances. + """ + + metric_spec: "RougeSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="RougeSpec", + ) + instances: MutableSequence["RougeInstance"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="RougeInstance", + ) + + +class RougeInstance(proto.Message): + r"""Spec for rouge instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Required. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +class RougeSpec(proto.Message): + r"""Spec for rouge score metric - calculates the recall of + n-grams in prediction as compared to reference - returns a score + ranging between 0 and 1. + + Attributes: + rouge_type (str): + Optional. Supported rouge types are rougen[1-9], rougeL and + rougeLsum. + use_stemmer (bool): + Optional. Whether to use stemmer to compute + rouge score. + split_summaries (bool): + Optional. Whether to split summaries while + using rougeLsum. + """ + + rouge_type: str = proto.Field( + proto.STRING, + number=1, + ) + use_stemmer: bool = proto.Field( + proto.BOOL, + number=2, + ) + split_summaries: bool = proto.Field( + proto.BOOL, + number=3, + ) + + +class RougeResults(proto.Message): + r"""Results for rouge metric. + + Attributes: + rouge_metric_values (MutableSequence[google.cloud.aiplatform_v1beta1.types.RougeMetricValue]): + Output only. Rouge metric values. + """ + + rouge_metric_values: MutableSequence["RougeMetricValue"] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="RougeMetricValue", + ) + + +class RougeMetricValue(proto.Message): + r"""Rouge metric value for an instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Rouge score. + + This field is a member of `oneof`_ ``_score``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + + +class CoherenceInput(proto.Message): + r"""Input for coherence metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.CoherenceSpec): + Required. Spec for coherence score metric. + instance (google.cloud.aiplatform_v1beta1.types.CoherenceInstance): + Required. Coherence instance. + """ + + metric_spec: "CoherenceSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="CoherenceSpec", + ) + instance: "CoherenceInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="CoherenceInstance", + ) + + +class CoherenceInstance(proto.Message): + r"""Spec for coherence instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + + +class CoherenceSpec(proto.Message): + r"""Spec for coherence score metric. + + Attributes: + version (int): + Optional. Which version to use for + evaluation. + """ + + version: int = proto.Field( + proto.INT32, + number=1, + ) + + +class CoherenceResult(proto.Message): + r"""Spec for coherence result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Coherence score. + + This field is a member of `oneof`_ ``_score``. + explanation (str): + Output only. Explanation for coherence score. + confidence (float): + Output only. Confidence for coherence score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class FluencyInput(proto.Message): + r"""Input for fluency metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.FluencySpec): + Required. Spec for fluency score metric. + instance (google.cloud.aiplatform_v1beta1.types.FluencyInstance): + Required. Fluency instance. + """ + + metric_spec: "FluencySpec" = proto.Field( + proto.MESSAGE, + number=1, + message="FluencySpec", + ) + instance: "FluencyInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="FluencyInstance", + ) + + +class FluencyInstance(proto.Message): + r"""Spec for fluency instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + + +class FluencySpec(proto.Message): + r"""Spec for fluency score metric. + + Attributes: + version (int): + Optional. Which version to use for + evaluation. + """ + + version: int = proto.Field( + proto.INT32, + number=1, + ) + + +class FluencyResult(proto.Message): + r"""Spec for fluency result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Fluency score. + + This field is a member of `oneof`_ ``_score``. + explanation (str): + Output only. Explanation for fluency score. + confidence (float): + Output only. Confidence for fluency score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class SafetyInput(proto.Message): + r"""Input for safety metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.SafetySpec): + Required. Spec for safety metric. + instance (google.cloud.aiplatform_v1beta1.types.SafetyInstance): + Required. Safety instance. + """ + + metric_spec: "SafetySpec" = proto.Field( + proto.MESSAGE, + number=1, + message="SafetySpec", + ) + instance: "SafetyInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="SafetyInstance", + ) + + +class SafetyInstance(proto.Message): + r"""Spec for safety instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + + +class SafetySpec(proto.Message): + r"""Spec for safety metric. + + Attributes: + version (int): + Optional. Which version to use for + evaluation. + """ + + version: int = proto.Field( + proto.INT32, + number=1, + ) + + +class SafetyResult(proto.Message): + r"""Spec for safety result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Safety score. + + This field is a member of `oneof`_ ``_score``. + explanation (str): + Output only. Explanation for safety score. + confidence (float): + Output only. Confidence for safety score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class GroundednessInput(proto.Message): + r"""Input for groundedness metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.GroundednessSpec): + Required. Spec for groundedness metric. + instance (google.cloud.aiplatform_v1beta1.types.GroundednessInstance): + Required. Groundedness instance. + """ + + metric_spec: "GroundednessSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="GroundednessSpec", + ) + instance: "GroundednessInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="GroundednessInstance", + ) + + +class GroundednessInstance(proto.Message): + r"""Spec for groundedness instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + context (str): + Required. Background information provided in + context used to compare against the prediction. + + This field is a member of `oneof`_ ``_context``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + context: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +class GroundednessSpec(proto.Message): + r"""Spec for groundedness metric. + + Attributes: + version (int): + Optional. Which version to use for + evaluation. + """ + + version: int = proto.Field( + proto.INT32, + number=1, + ) + + +class GroundednessResult(proto.Message): + r"""Spec for groundedness result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Groundedness score. + + This field is a member of `oneof`_ ``_score``. + explanation (str): + Output only. Explanation for groundedness + score. + confidence (float): + Output only. Confidence for groundedness + score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class FulfillmentInput(proto.Message): + r"""Input for fulfillment metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.FulfillmentSpec): + Required. Spec for fulfillment score metric. + instance (google.cloud.aiplatform_v1beta1.types.FulfillmentInstance): + Required. Fulfillment instance. + """ + + metric_spec: "FulfillmentSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="FulfillmentSpec", + ) + instance: "FulfillmentInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="FulfillmentInstance", + ) + + +class FulfillmentInstance(proto.Message): + r"""Spec for fulfillment instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + instruction (str): + Required. Inference instruction prompt to + compare prediction with. + + This field is a member of `oneof`_ ``_instruction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + instruction: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +class FulfillmentSpec(proto.Message): + r"""Spec for fulfillment metric. + + Attributes: + version (int): + Optional. Which version to use for + evaluation. + """ + + version: int = proto.Field( + proto.INT32, + number=1, + ) + + +class FulfillmentResult(proto.Message): + r"""Spec for fulfillment result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Fulfillment score. + + This field is a member of `oneof`_ ``_score``. + explanation (str): + Output only. Explanation for fulfillment + score. + confidence (float): + Output only. Confidence for fulfillment + score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class SummarizationQualityInput(proto.Message): + r"""Input for summarization quality metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.SummarizationQualitySpec): + Required. Spec for summarization quality + score metric. + instance (google.cloud.aiplatform_v1beta1.types.SummarizationQualityInstance): + Required. Summarization quality instance. + """ + + metric_spec: "SummarizationQualitySpec" = proto.Field( + proto.MESSAGE, + number=1, + message="SummarizationQualitySpec", + ) + instance: "SummarizationQualityInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="SummarizationQualityInstance", + ) + + +class SummarizationQualityInstance(proto.Message): + r"""Spec for summarization quality instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Optional. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + context (str): + Required. Text to be summarized. + + This field is a member of `oneof`_ ``_context``. + instruction (str): + Required. Summarization prompt for LLM. + + This field is a member of `oneof`_ ``_instruction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + context: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + instruction: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + + +class SummarizationQualitySpec(proto.Message): + r"""Spec for summarization quality score metric. + + Attributes: + use_reference (bool): + Optional. Whether to use instance.reference + to compute summarization quality. + version (int): + Optional. Which version to use for + evaluation. + """ + + use_reference: bool = proto.Field( + proto.BOOL, + number=1, + ) + version: int = proto.Field( + proto.INT32, + number=2, + ) + + +class SummarizationQualityResult(proto.Message): + r"""Spec for summarization quality result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Summarization Quality score. + + This field is a member of `oneof`_ ``_score``. + explanation (str): + Output only. Explanation for summarization + quality score. + confidence (float): + Output only. Confidence for summarization + quality score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class PairwiseSummarizationQualityInput(proto.Message): + r"""Input for pairwise summarization quality metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.PairwiseSummarizationQualitySpec): + Required. Spec for pairwise summarization + quality score metric. + instance (google.cloud.aiplatform_v1beta1.types.PairwiseSummarizationQualityInstance): + Required. Pairwise summarization quality + instance. + """ + + metric_spec: "PairwiseSummarizationQualitySpec" = proto.Field( + proto.MESSAGE, + number=1, + message="PairwiseSummarizationQualitySpec", + ) + instance: "PairwiseSummarizationQualityInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="PairwiseSummarizationQualityInstance", + ) + + +class PairwiseSummarizationQualityInstance(proto.Message): + r"""Spec for pairwise summarization quality instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the candidate model. + + This field is a member of `oneof`_ ``_prediction``. + baseline_prediction (str): + Required. Output of the baseline model. + + This field is a member of `oneof`_ ``_baseline_prediction``. + reference (str): + Optional. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + context (str): + Required. Text to be summarized. + + This field is a member of `oneof`_ ``_context``. + instruction (str): + Required. Summarization prompt for LLM. + + This field is a member of `oneof`_ ``_instruction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + baseline_prediction: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + context: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + instruction: str = proto.Field( + proto.STRING, + number=5, + optional=True, + ) + + +class PairwiseSummarizationQualitySpec(proto.Message): + r"""Spec for pairwise summarization quality score metric. + + Attributes: + use_reference (bool): + Optional. Whether to use instance.reference + to compute pairwise summarization quality. + version (int): + Optional. Which version to use for + evaluation. + """ + + use_reference: bool = proto.Field( + proto.BOOL, + number=1, + ) + version: int = proto.Field( + proto.INT32, + number=2, + ) + + +class PairwiseSummarizationQualityResult(proto.Message): + r"""Spec for pairwise summarization quality result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + pairwise_choice (google.cloud.aiplatform_v1beta1.types.PairwiseChoice): + Output only. Pairwise summarization + prediction choice. + explanation (str): + Output only. Explanation for summarization + quality score. + confidence (float): + Output only. Confidence for summarization + quality score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + pairwise_choice: "PairwiseChoice" = proto.Field( + proto.ENUM, + number=1, + enum="PairwiseChoice", + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class SummarizationHelpfulnessInput(proto.Message): + r"""Input for summarization helpfulness metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.SummarizationHelpfulnessSpec): + Required. Spec for summarization helpfulness + score metric. + instance (google.cloud.aiplatform_v1beta1.types.SummarizationHelpfulnessInstance): + Required. Summarization helpfulness instance. + """ + + metric_spec: "SummarizationHelpfulnessSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="SummarizationHelpfulnessSpec", + ) + instance: "SummarizationHelpfulnessInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="SummarizationHelpfulnessInstance", + ) + + +class SummarizationHelpfulnessInstance(proto.Message): + r"""Spec for summarization helpfulness instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Optional. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + context (str): + Required. Text to be summarized. + + This field is a member of `oneof`_ ``_context``. + instruction (str): + Optional. Summarization prompt for LLM. + + This field is a member of `oneof`_ ``_instruction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + context: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + instruction: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + + +class SummarizationHelpfulnessSpec(proto.Message): + r"""Spec for summarization helpfulness score metric. + + Attributes: + use_reference (bool): + Optional. Whether to use instance.reference + to compute summarization helpfulness. + version (int): + Optional. Which version to use for + evaluation. + """ + + use_reference: bool = proto.Field( + proto.BOOL, + number=1, + ) + version: int = proto.Field( + proto.INT32, + number=2, + ) + + +class SummarizationHelpfulnessResult(proto.Message): + r"""Spec for summarization helpfulness result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Summarization Helpfulness score. + + This field is a member of `oneof`_ ``_score``. + explanation (str): + Output only. Explanation for summarization + helpfulness score. + confidence (float): + Output only. Confidence for summarization + helpfulness score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class SummarizationVerbosityInput(proto.Message): + r"""Input for summarization verbosity metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.SummarizationVerbositySpec): + Required. Spec for summarization verbosity + score metric. + instance (google.cloud.aiplatform_v1beta1.types.SummarizationVerbosityInstance): + Required. Summarization verbosity instance. + """ + + metric_spec: "SummarizationVerbositySpec" = proto.Field( + proto.MESSAGE, + number=1, + message="SummarizationVerbositySpec", + ) + instance: "SummarizationVerbosityInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="SummarizationVerbosityInstance", + ) + + +class SummarizationVerbosityInstance(proto.Message): + r"""Spec for summarization verbosity instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Optional. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + context (str): + Required. Text to be summarized. + + This field is a member of `oneof`_ ``_context``. + instruction (str): + Optional. Summarization prompt for LLM. + + This field is a member of `oneof`_ ``_instruction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + context: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + instruction: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + + +class SummarizationVerbositySpec(proto.Message): + r"""Spec for summarization verbosity score metric. + + Attributes: + use_reference (bool): + Optional. Whether to use instance.reference + to compute summarization verbosity. + version (int): + Optional. Which version to use for + evaluation. + """ + + use_reference: bool = proto.Field( + proto.BOOL, + number=1, + ) + version: int = proto.Field( + proto.INT32, + number=2, + ) + + +class SummarizationVerbosityResult(proto.Message): + r"""Spec for summarization verbosity result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Summarization Verbosity score. + + This field is a member of `oneof`_ ``_score``. + explanation (str): + Output only. Explanation for summarization + verbosity score. + confidence (float): + Output only. Confidence for summarization + verbosity score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class QuestionAnsweringQualityInput(proto.Message): + r"""Input for question answering quality metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringQualitySpec): + Required. Spec for question answering quality + score metric. + instance (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringQualityInstance): + Required. Question answering quality + instance. + """ + + metric_spec: "QuestionAnsweringQualitySpec" = proto.Field( + proto.MESSAGE, + number=1, + message="QuestionAnsweringQualitySpec", + ) + instance: "QuestionAnsweringQualityInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="QuestionAnsweringQualityInstance", + ) + + +class QuestionAnsweringQualityInstance(proto.Message): + r"""Spec for question answering quality instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Optional. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + context (str): + Required. Text to answer the question. + + This field is a member of `oneof`_ ``_context``. + instruction (str): + Required. Question Answering prompt for LLM. + + This field is a member of `oneof`_ ``_instruction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + context: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + instruction: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + + +class QuestionAnsweringQualitySpec(proto.Message): + r"""Spec for question answering quality score metric. + + Attributes: + use_reference (bool): + Optional. Whether to use instance.reference + to compute question answering quality. + version (int): + Optional. Which version to use for + evaluation. + """ + + use_reference: bool = proto.Field( + proto.BOOL, + number=1, + ) + version: int = proto.Field( + proto.INT32, + number=2, + ) + + +class QuestionAnsweringQualityResult(proto.Message): + r"""Spec for question answering quality result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Question Answering Quality + score. + + This field is a member of `oneof`_ ``_score``. + explanation (str): + Output only. Explanation for question + answering quality score. + confidence (float): + Output only. Confidence for question + answering quality score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class PairwiseQuestionAnsweringQualityInput(proto.Message): + r"""Input for pairwise question answering quality metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.PairwiseQuestionAnsweringQualitySpec): + Required. Spec for pairwise question + answering quality score metric. + instance (google.cloud.aiplatform_v1beta1.types.PairwiseQuestionAnsweringQualityInstance): + Required. Pairwise question answering quality + instance. + """ + + metric_spec: "PairwiseQuestionAnsweringQualitySpec" = proto.Field( + proto.MESSAGE, + number=1, + message="PairwiseQuestionAnsweringQualitySpec", + ) + instance: "PairwiseQuestionAnsweringQualityInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="PairwiseQuestionAnsweringQualityInstance", + ) + + +class PairwiseQuestionAnsweringQualityInstance(proto.Message): + r"""Spec for pairwise question answering quality instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the candidate model. + + This field is a member of `oneof`_ ``_prediction``. + baseline_prediction (str): + Required. Output of the baseline model. + + This field is a member of `oneof`_ ``_baseline_prediction``. + reference (str): + Optional. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + context (str): + Required. Text to answer the question. + + This field is a member of `oneof`_ ``_context``. + instruction (str): + Required. Question Answering prompt for LLM. + + This field is a member of `oneof`_ ``_instruction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + baseline_prediction: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + context: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + instruction: str = proto.Field( + proto.STRING, + number=5, + optional=True, + ) + + +class PairwiseQuestionAnsweringQualitySpec(proto.Message): + r"""Spec for pairwise question answering quality score metric. + + Attributes: + use_reference (bool): + Optional. Whether to use instance.reference + to compute question answering quality. + version (int): + Optional. Which version to use for + evaluation. + """ + + use_reference: bool = proto.Field( + proto.BOOL, + number=1, + ) + version: int = proto.Field( + proto.INT32, + number=2, + ) + + +class PairwiseQuestionAnsweringQualityResult(proto.Message): + r"""Spec for pairwise question answering quality result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + pairwise_choice (google.cloud.aiplatform_v1beta1.types.PairwiseChoice): + Output only. Pairwise question answering + prediction choice. + explanation (str): + Output only. Explanation for question + answering quality score. + confidence (float): + Output only. Confidence for question + answering quality score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + pairwise_choice: "PairwiseChoice" = proto.Field( + proto.ENUM, + number=1, + enum="PairwiseChoice", + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class QuestionAnsweringRelevanceInput(proto.Message): + r"""Input for question answering relevance metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringRelevanceSpec): + Required. Spec for question answering + relevance score metric. + instance (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringRelevanceInstance): + Required. Question answering relevance + instance. + """ + + metric_spec: "QuestionAnsweringRelevanceSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="QuestionAnsweringRelevanceSpec", + ) + instance: "QuestionAnsweringRelevanceInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="QuestionAnsweringRelevanceInstance", + ) + + +class QuestionAnsweringRelevanceInstance(proto.Message): + r"""Spec for question answering relevance instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Optional. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + context (str): + Optional. Text provided as context to answer + the question. + + This field is a member of `oneof`_ ``_context``. + instruction (str): + Required. The question asked and other + instruction in the inference prompt. + + This field is a member of `oneof`_ ``_instruction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + context: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + instruction: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + + +class QuestionAnsweringRelevanceSpec(proto.Message): + r"""Spec for question answering relevance metric. + + Attributes: + use_reference (bool): + Optional. Whether to use instance.reference + to compute question answering relevance. + version (int): + Optional. Which version to use for + evaluation. + """ + + use_reference: bool = proto.Field( + proto.BOOL, + number=1, + ) + version: int = proto.Field( + proto.INT32, + number=2, + ) + + +class QuestionAnsweringRelevanceResult(proto.Message): + r"""Spec for question answering relevance result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Question Answering Relevance + score. + + This field is a member of `oneof`_ ``_score``. + explanation (str): + Output only. Explanation for question + answering relevance score. + confidence (float): + Output only. Confidence for question + answering relevance score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class QuestionAnsweringHelpfulnessInput(proto.Message): + r"""Input for question answering helpfulness metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringHelpfulnessSpec): + Required. Spec for question answering + helpfulness score metric. + instance (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringHelpfulnessInstance): + Required. Question answering helpfulness + instance. + """ + + metric_spec: "QuestionAnsweringHelpfulnessSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="QuestionAnsweringHelpfulnessSpec", + ) + instance: "QuestionAnsweringHelpfulnessInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="QuestionAnsweringHelpfulnessInstance", + ) + + +class QuestionAnsweringHelpfulnessInstance(proto.Message): + r"""Spec for question answering helpfulness instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Optional. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + context (str): + Optional. Text provided as context to answer + the question. + + This field is a member of `oneof`_ ``_context``. + instruction (str): + Required. The question asked and other + instruction in the inference prompt. + + This field is a member of `oneof`_ ``_instruction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + context: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + instruction: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + + +class QuestionAnsweringHelpfulnessSpec(proto.Message): + r"""Spec for question answering helpfulness metric. + + Attributes: + use_reference (bool): + Optional. Whether to use instance.reference + to compute question answering helpfulness. + version (int): + Optional. Which version to use for + evaluation. + """ + + use_reference: bool = proto.Field( + proto.BOOL, + number=1, + ) + version: int = proto.Field( + proto.INT32, + number=2, + ) + + +class QuestionAnsweringHelpfulnessResult(proto.Message): + r"""Spec for question answering helpfulness result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Question Answering Helpfulness + score. + + This field is a member of `oneof`_ ``_score``. + explanation (str): + Output only. Explanation for question + answering helpfulness score. + confidence (float): + Output only. Confidence for question + answering helpfulness score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class QuestionAnsweringCorrectnessInput(proto.Message): + r"""Input for question answering correctness metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringCorrectnessSpec): + Required. Spec for question answering + correctness score metric. + instance (google.cloud.aiplatform_v1beta1.types.QuestionAnsweringCorrectnessInstance): + Required. Question answering correctness + instance. + """ + + metric_spec: "QuestionAnsweringCorrectnessSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="QuestionAnsweringCorrectnessSpec", + ) + instance: "QuestionAnsweringCorrectnessInstance" = proto.Field( + proto.MESSAGE, + number=2, + message="QuestionAnsweringCorrectnessInstance", + ) + + +class QuestionAnsweringCorrectnessInstance(proto.Message): + r"""Spec for question answering correctness instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Optional. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + context (str): + Optional. Text provided as context to answer + the question. + + This field is a member of `oneof`_ ``_context``. + instruction (str): + Required. The question asked and other + instruction in the inference prompt. + + This field is a member of `oneof`_ ``_instruction``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + context: str = proto.Field( + proto.STRING, + number=3, + optional=True, + ) + instruction: str = proto.Field( + proto.STRING, + number=4, + optional=True, + ) + + +class QuestionAnsweringCorrectnessSpec(proto.Message): + r"""Spec for question answering correctness metric. + + Attributes: + use_reference (bool): + Optional. Whether to use instance.reference + to compute question answering correctness. + version (int): + Optional. Which version to use for + evaluation. + """ + + use_reference: bool = proto.Field( + proto.BOOL, + number=1, + ) + version: int = proto.Field( + proto.INT32, + number=2, + ) + + +class QuestionAnsweringCorrectnessResult(proto.Message): + r"""Spec for question answering correctness result. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Question Answering Correctness + score. + + This field is a member of `oneof`_ ``_score``. + explanation (str): + Output only. Explanation for question + answering correctness score. + confidence (float): + Output only. Confidence for question + answering correctness score. + + This field is a member of `oneof`_ ``_confidence``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + explanation: str = proto.Field( + proto.STRING, + number=2, + ) + confidence: float = proto.Field( + proto.FLOAT, + number=3, + optional=True, + ) + + +class ToolCallValidInput(proto.Message): + r"""Input for tool call valid metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.ToolCallValidSpec): + Required. Spec for tool call valid metric. + instances (MutableSequence[google.cloud.aiplatform_v1beta1.types.ToolCallValidInstance]): + Required. Repeated tool call valid instances. + """ + + metric_spec: "ToolCallValidSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="ToolCallValidSpec", + ) + instances: MutableSequence["ToolCallValidInstance"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="ToolCallValidInstance", + ) + + +class ToolCallValidSpec(proto.Message): + r"""Spec for tool call valid metric.""" + + +class ToolCallValidInstance(proto.Message): + r"""Spec for tool call valid instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Required. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +class ToolCallValidResults(proto.Message): + r"""Results for tool call valid metric. + + Attributes: + tool_call_valid_metric_values (MutableSequence[google.cloud.aiplatform_v1beta1.types.ToolCallValidMetricValue]): + Output only. Tool call valid metric values. + """ + + tool_call_valid_metric_values: MutableSequence[ + "ToolCallValidMetricValue" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ToolCallValidMetricValue", + ) + + +class ToolCallValidMetricValue(proto.Message): + r"""Tool call valid metric value for an instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Tool call valid score. + + This field is a member of `oneof`_ ``_score``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + + +class ToolNameMatchInput(proto.Message): + r"""Input for tool name match metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.ToolNameMatchSpec): + Required. Spec for tool name match metric. + instances (MutableSequence[google.cloud.aiplatform_v1beta1.types.ToolNameMatchInstance]): + Required. Repeated tool name match instances. + """ + + metric_spec: "ToolNameMatchSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="ToolNameMatchSpec", + ) + instances: MutableSequence["ToolNameMatchInstance"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="ToolNameMatchInstance", + ) + + +class ToolNameMatchSpec(proto.Message): + r"""Spec for tool name match metric.""" + + +class ToolNameMatchInstance(proto.Message): + r"""Spec for tool name match instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Required. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +class ToolNameMatchResults(proto.Message): + r"""Results for tool name match metric. + + Attributes: + tool_name_match_metric_values (MutableSequence[google.cloud.aiplatform_v1beta1.types.ToolNameMatchMetricValue]): + Output only. Tool name match metric values. + """ + + tool_name_match_metric_values: MutableSequence[ + "ToolNameMatchMetricValue" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ToolNameMatchMetricValue", + ) + + +class ToolNameMatchMetricValue(proto.Message): + r"""Tool name match metric value for an instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Tool name match score. + + This field is a member of `oneof`_ ``_score``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + + +class ToolParameterKeyMatchInput(proto.Message): + r"""Input for tool parameter key match metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.ToolParameterKeyMatchSpec): + Required. Spec for tool parameter key match + metric. + instances (MutableSequence[google.cloud.aiplatform_v1beta1.types.ToolParameterKeyMatchInstance]): + Required. Repeated tool parameter key match + instances. + """ + + metric_spec: "ToolParameterKeyMatchSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="ToolParameterKeyMatchSpec", + ) + instances: MutableSequence["ToolParameterKeyMatchInstance"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="ToolParameterKeyMatchInstance", + ) + + +class ToolParameterKeyMatchSpec(proto.Message): + r"""Spec for tool parameter key match metric.""" + + +class ToolParameterKeyMatchInstance(proto.Message): + r"""Spec for tool parameter key match instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Required. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +class ToolParameterKeyMatchResults(proto.Message): + r"""Results for tool parameter key match metric. + + Attributes: + tool_parameter_key_match_metric_values (MutableSequence[google.cloud.aiplatform_v1beta1.types.ToolParameterKeyMatchMetricValue]): + Output only. Tool parameter key match metric + values. + """ + + tool_parameter_key_match_metric_values: MutableSequence[ + "ToolParameterKeyMatchMetricValue" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ToolParameterKeyMatchMetricValue", + ) + + +class ToolParameterKeyMatchMetricValue(proto.Message): + r"""Tool parameter key match metric value for an instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Tool parameter key match score. + + This field is a member of `oneof`_ ``_score``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + + +class ToolParameterKVMatchInput(proto.Message): + r"""Input for tool parameter key value match metric. + + Attributes: + metric_spec (google.cloud.aiplatform_v1beta1.types.ToolParameterKVMatchSpec): + Required. Spec for tool parameter key value + match metric. + instances (MutableSequence[google.cloud.aiplatform_v1beta1.types.ToolParameterKVMatchInstance]): + Required. Repeated tool parameter key value + match instances. + """ + + metric_spec: "ToolParameterKVMatchSpec" = proto.Field( + proto.MESSAGE, + number=1, + message="ToolParameterKVMatchSpec", + ) + instances: MutableSequence["ToolParameterKVMatchInstance"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="ToolParameterKVMatchInstance", + ) + + +class ToolParameterKVMatchSpec(proto.Message): + r"""Spec for tool parameter key value match metric. + + Attributes: + use_strict_string_match (bool): + Optional. Whether to use STRCIT string match + on parameter values. + """ + + use_strict_string_match: bool = proto.Field( + proto.BOOL, + number=1, + ) + + +class ToolParameterKVMatchInstance(proto.Message): + r"""Spec for tool parameter key value match instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + prediction (str): + Required. Output of the evaluated model. + + This field is a member of `oneof`_ ``_prediction``. + reference (str): + Required. Ground truth used to compare + against the prediction. + + This field is a member of `oneof`_ ``_reference``. + """ + + prediction: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + reference: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +class ToolParameterKVMatchResults(proto.Message): + r"""Results for tool parameter key value match metric. + + Attributes: + tool_parameter_kv_match_metric_values (MutableSequence[google.cloud.aiplatform_v1beta1.types.ToolParameterKVMatchMetricValue]): + Output only. Tool parameter key value match + metric values. + """ + + tool_parameter_kv_match_metric_values: MutableSequence[ + "ToolParameterKVMatchMetricValue" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ToolParameterKVMatchMetricValue", + ) + + +class ToolParameterKVMatchMetricValue(proto.Message): + r"""Tool parameter key value match metric value for an instance. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + score (float): + Output only. Tool parameter key value match + score. + + This field is a member of `oneof`_ ``_score``. + """ + + score: float = proto.Field( + proto.FLOAT, + number=1, + optional=True, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/extension.py b/google/cloud/aiplatform_v1beta1/types/extension.py new file mode 100644 index 0000000000..bc021bb6fb --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/extension.py @@ -0,0 +1,665 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import tool +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "HttpElementLocation", + "AuthType", + "Extension", + "ExtensionManifest", + "ExtensionOperation", + "AuthConfig", + "RuntimeConfig", + "ExtensionPrivateServiceConnectConfig", + }, +) + + +class HttpElementLocation(proto.Enum): + r"""Enum of location an HTTP element can be. + + Values: + HTTP_IN_UNSPECIFIED (0): + No description available. + HTTP_IN_QUERY (1): + Element is in the HTTP request query. + HTTP_IN_HEADER (2): + Element is in the HTTP request header. + HTTP_IN_PATH (3): + Element is in the HTTP request path. + HTTP_IN_BODY (4): + Element is in the HTTP request body. + HTTP_IN_COOKIE (5): + Element is in the HTTP request cookie. + """ + HTTP_IN_UNSPECIFIED = 0 + HTTP_IN_QUERY = 1 + HTTP_IN_HEADER = 2 + HTTP_IN_PATH = 3 + HTTP_IN_BODY = 4 + HTTP_IN_COOKIE = 5 + + +class AuthType(proto.Enum): + r"""Type of Auth. + + Values: + AUTH_TYPE_UNSPECIFIED (0): + No description available. + NO_AUTH (1): + No Auth. + API_KEY_AUTH (2): + API Key Auth. + HTTP_BASIC_AUTH (3): + HTTP Basic Auth. + GOOGLE_SERVICE_ACCOUNT_AUTH (4): + Google Service Account Auth. + OAUTH (6): + OAuth auth. + OIDC_AUTH (8): + OpenID Connect (OIDC) Auth. + """ + AUTH_TYPE_UNSPECIFIED = 0 + NO_AUTH = 1 + API_KEY_AUTH = 2 + HTTP_BASIC_AUTH = 3 + GOOGLE_SERVICE_ACCOUNT_AUTH = 4 + OAUTH = 6 + OIDC_AUTH = 8 + + +class Extension(proto.Message): + r"""Extensions are tools for large language models to access + external data, run computations, etc. + + Attributes: + name (str): + Identifier. The resource name of the + Extension. + display_name (str): + Required. The display name of the Extension. + The name can be up to 128 characters long and + can consist of any UTF-8 characters. + description (str): + Optional. The description of the Extension. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Extension + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this Extension + was most recently updated. + etag (str): + Optional. Used to perform consistent + read-modify-write updates. If not set, a blind + "overwrite" update happens. + manifest (google.cloud.aiplatform_v1beta1.types.ExtensionManifest): + Required. Manifest of the Extension. + extension_operations (MutableSequence[google.cloud.aiplatform_v1beta1.types.ExtensionOperation]): + Output only. Supported operations. + runtime_config (google.cloud.aiplatform_v1beta1.types.RuntimeConfig): + Optional. Runtime config controlling the + runtime behavior of this Extension. + tool_use_examples (MutableSequence[google.cloud.aiplatform_v1beta1.types.ToolUseExample]): + Optional. Examples to illustrate the usage of + the extension as a tool. + private_service_connect_config (google.cloud.aiplatform_v1beta1.types.ExtensionPrivateServiceConnectConfig): + Optional. The PrivateServiceConnect config + for the extension. If specified, the service + endpoints associated with the Extension should + be registered with private network access in the + provided Service Directory + (https://cloud.google.com/service-directory/docs/configuring-private-network-access). + + If the service contains more than one endpoint + with a network, the service will arbitrarilty + choose one of the endpoints to use for extension + execution. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=3, + ) + description: str = proto.Field( + proto.STRING, + number=4, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + etag: str = proto.Field( + proto.STRING, + number=7, + ) + manifest: "ExtensionManifest" = proto.Field( + proto.MESSAGE, + number=9, + message="ExtensionManifest", + ) + extension_operations: MutableSequence["ExtensionOperation"] = proto.RepeatedField( + proto.MESSAGE, + number=11, + message="ExtensionOperation", + ) + runtime_config: "RuntimeConfig" = proto.Field( + proto.MESSAGE, + number=13, + message="RuntimeConfig", + ) + tool_use_examples: MutableSequence[tool.ToolUseExample] = proto.RepeatedField( + proto.MESSAGE, + number=15, + message=tool.ToolUseExample, + ) + private_service_connect_config: "ExtensionPrivateServiceConnectConfig" = ( + proto.Field( + proto.MESSAGE, + number=16, + message="ExtensionPrivateServiceConnectConfig", + ) + ) + + +class ExtensionManifest(proto.Message): + r"""Manifest spec of an Extension needed for runtime execution. + + Attributes: + name (str): + Required. Extension name shown to the LLM. + The name can be up to 128 characters long. + description (str): + Required. The natural language description + shown to the LLM. It should describe the usage + of the extension, and is essential for the LLM + to perform reasoning. + api_spec (google.cloud.aiplatform_v1beta1.types.ExtensionManifest.ApiSpec): + Required. Immutable. The API specification + shown to the LLM. + auth_config (google.cloud.aiplatform_v1beta1.types.AuthConfig): + Required. Immutable. Type of auth supported + by this extension. + """ + + class ApiSpec(proto.Message): + r"""The API specification shown to the LLM. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + open_api_yaml (str): + The API spec in Open API standard and YAML + format. + + This field is a member of `oneof`_ ``api_spec``. + open_api_gcs_uri (str): + Cloud Storage URI pointing to the OpenAPI + spec. + + This field is a member of `oneof`_ ``api_spec``. + """ + + open_api_yaml: str = proto.Field( + proto.STRING, + number=1, + oneof="api_spec", + ) + open_api_gcs_uri: str = proto.Field( + proto.STRING, + number=2, + oneof="api_spec", + ) + + name: str = proto.Field( + proto.STRING, + number=1, + ) + description: str = proto.Field( + proto.STRING, + number=2, + ) + api_spec: ApiSpec = proto.Field( + proto.MESSAGE, + number=3, + message=ApiSpec, + ) + auth_config: "AuthConfig" = proto.Field( + proto.MESSAGE, + number=5, + message="AuthConfig", + ) + + +class ExtensionOperation(proto.Message): + r"""Operation of an extension. + + Attributes: + operation_id (str): + Operation ID that uniquely identifies the + operations among the extension. See: "Operation + Object" in https://swagger.io/specification/. + + This field is parsed from the OpenAPI spec. For + HTTP extensions, if it does not exist in the + spec, we will generate one from the HTTP method + and path. + function_declaration (google.cloud.aiplatform_v1beta1.types.FunctionDeclaration): + Output only. Structured representation of a + function declaration as defined by the OpenAPI + Spec. + """ + + operation_id: str = proto.Field( + proto.STRING, + number=1, + ) + function_declaration: tool.FunctionDeclaration = proto.Field( + proto.MESSAGE, + number=3, + message=tool.FunctionDeclaration, + ) + + +class AuthConfig(proto.Message): + r"""Auth configuration to run the extension. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + api_key_config (google.cloud.aiplatform_v1beta1.types.AuthConfig.ApiKeyConfig): + Config for API key auth. + + This field is a member of `oneof`_ ``auth_config``. + http_basic_auth_config (google.cloud.aiplatform_v1beta1.types.AuthConfig.HttpBasicAuthConfig): + Config for HTTP Basic auth. + + This field is a member of `oneof`_ ``auth_config``. + google_service_account_config (google.cloud.aiplatform_v1beta1.types.AuthConfig.GoogleServiceAccountConfig): + Config for Google Service Account auth. + + This field is a member of `oneof`_ ``auth_config``. + oauth_config (google.cloud.aiplatform_v1beta1.types.AuthConfig.OauthConfig): + Config for user oauth. + + This field is a member of `oneof`_ ``auth_config``. + oidc_config (google.cloud.aiplatform_v1beta1.types.AuthConfig.OidcConfig): + Config for user OIDC auth. + + This field is a member of `oneof`_ ``auth_config``. + auth_type (google.cloud.aiplatform_v1beta1.types.AuthType): + Type of auth scheme. + """ + + class ApiKeyConfig(proto.Message): + r"""Config for authentication with API key. + + Attributes: + name (str): + Required. The parameter name of the API key. E.g. If the API + request is "https://example.com/act?api_key=", "api_key" + would be the parameter name. + api_key_secret (str): + Required. The name of the SecretManager secret version + resource storing the API key. Format: + ``projects/{project}/secrets/{secrete}/versions/{version}`` + + - If specified, the ``secretmanager.versions.access`` + permission should be granted to Vertex AI Extension + Service Agent + (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) + on the specified resource. + http_element_location (google.cloud.aiplatform_v1beta1.types.HttpElementLocation): + Required. The location of the API key. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + api_key_secret: str = proto.Field( + proto.STRING, + number=2, + ) + http_element_location: "HttpElementLocation" = proto.Field( + proto.ENUM, + number=3, + enum="HttpElementLocation", + ) + + class HttpBasicAuthConfig(proto.Message): + r"""Config for HTTP Basic Authentication. + + Attributes: + credential_secret (str): + Required. The name of the SecretManager secret version + resource storing the base64 encoded credentials. Format: + ``projects/{project}/secrets/{secrete}/versions/{version}`` + + - If specified, the ``secretmanager.versions.access`` + permission should be granted to Vertex AI Extension + Service Agent + (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) + on the specified resource. + """ + + credential_secret: str = proto.Field( + proto.STRING, + number=2, + ) + + class GoogleServiceAccountConfig(proto.Message): + r"""Config for Google Service Account Authentication. + + Attributes: + service_account (str): + Optional. The service account that the extension execution + service runs as. + + - If the service account is specified, the + ``iam.serviceAccounts.getAccessToken`` permission should + be granted to Vertex AI Extension Service Agent + (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) + on the specified service account. + + - If not specified, the Vertex AI Extension Service Agent + will be used to execute the Extension. + """ + + service_account: str = proto.Field( + proto.STRING, + number=1, + ) + + class OauthConfig(proto.Message): + r"""Config for user oauth. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + access_token (str): + Access token for extension endpoint. Only used to propagate + token from [[ExecuteExtensionRequest.runtime_auth_config]] + at request time. + + This field is a member of `oneof`_ ``oauth_config``. + service_account (str): + The service account used to generate access tokens for + executing the Extension. + + - If the service account is specified, the + ``iam.serviceAccounts.getAccessToken`` permission should + be granted to Vertex AI Extension Service Agent + (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) + on the provided service account. + + This field is a member of `oneof`_ ``oauth_config``. + """ + + access_token: str = proto.Field( + proto.STRING, + number=1, + oneof="oauth_config", + ) + service_account: str = proto.Field( + proto.STRING, + number=2, + oneof="oauth_config", + ) + + class OidcConfig(proto.Message): + r"""Config for user OIDC auth. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + id_token (str): + OpenID Connect formatted ID token for extension endpoint. + Only used to propagate token from + [[ExecuteExtensionRequest.runtime_auth_config]] at request + time. + + This field is a member of `oneof`_ ``oidc_config``. + service_account (str): + The service account used to generate an OpenID Connect + (OIDC)-compatible JWT token signed by the Google OIDC + Provider (accounts.google.com) for extension endpoint + (https://cloud.google.com/iam/docs/create-short-lived-credentials-direct#sa-credentials-oidc). + + - The audience for the token will be set to the URL in the + server url defined in the OpenApi spec. + + - If the service account is provided, the service account + should grant ``iam.serviceAccounts.getOpenIdToken`` + permission to Vertex AI Extension Service Agent + (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). + + This field is a member of `oneof`_ ``oidc_config``. + """ + + id_token: str = proto.Field( + proto.STRING, + number=1, + oneof="oidc_config", + ) + service_account: str = proto.Field( + proto.STRING, + number=2, + oneof="oidc_config", + ) + + api_key_config: ApiKeyConfig = proto.Field( + proto.MESSAGE, + number=2, + oneof="auth_config", + message=ApiKeyConfig, + ) + http_basic_auth_config: HttpBasicAuthConfig = proto.Field( + proto.MESSAGE, + number=3, + oneof="auth_config", + message=HttpBasicAuthConfig, + ) + google_service_account_config: GoogleServiceAccountConfig = proto.Field( + proto.MESSAGE, + number=4, + oneof="auth_config", + message=GoogleServiceAccountConfig, + ) + oauth_config: OauthConfig = proto.Field( + proto.MESSAGE, + number=5, + oneof="auth_config", + message=OauthConfig, + ) + oidc_config: OidcConfig = proto.Field( + proto.MESSAGE, + number=7, + oneof="auth_config", + message=OidcConfig, + ) + auth_type: "AuthType" = proto.Field( + proto.ENUM, + number=101, + enum="AuthType", + ) + + +class RuntimeConfig(proto.Message): + r"""Runtime configuration to run the extension. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + code_interpreter_runtime_config (google.cloud.aiplatform_v1beta1.types.RuntimeConfig.CodeInterpreterRuntimeConfig): + Code execution runtime configurations for + code interpreter extension. + + This field is a member of `oneof`_ ``GoogleFirstPartyExtensionConfig``. + vertex_ai_search_runtime_config (google.cloud.aiplatform_v1beta1.types.RuntimeConfig.VertexAISearchRuntimeConfig): + Runtime configuration for Vertext AI Search + extension. + + This field is a member of `oneof`_ ``GoogleFirstPartyExtensionConfig``. + default_params (google.protobuf.struct_pb2.Struct): + Optional. Default parameters that will be set for all the + execution of this extension. If specified, the parameter + values can be overridden by values in + [[ExecuteExtensionRequest.operation_params]] at request + time. + + The struct should be in a form of map with param name as the + key and actual param value as the value. E.g. If this + operation requires a param "name" to be set to "abc". you + can set this to something like {"name": "abc"}. + """ + + class CodeInterpreterRuntimeConfig(proto.Message): + r""" + + Attributes: + file_input_gcs_bucket (str): + Optional. The GCS bucket for file input of + this Extension. If specified, support input from + the GCS bucket. Vertex Extension Custom Code + Service Agent should be granted file reader to + this bucket. + If not specified, the extension will only accept + file contents from request body and reject GCS + file inputs. + file_output_gcs_bucket (str): + Optional. The GCS bucket for file output of + this Extension. If specified, write all output + files to the GCS bucket. Vertex Extension Custom + Code Service Agent should be granted file writer + to this bucket. + If not specified, the file content will be + output in response body. + """ + + file_input_gcs_bucket: str = proto.Field( + proto.STRING, + number=1, + ) + file_output_gcs_bucket: str = proto.Field( + proto.STRING, + number=2, + ) + + class VertexAISearchRuntimeConfig(proto.Message): + r""" + + Attributes: + serving_config_name (str): + Required. Vertext AI Search serving config name. Format: + ``projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}`` + or + ``projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/servingConfigs/{serving_config}`` + """ + + serving_config_name: str = proto.Field( + proto.STRING, + number=1, + ) + + code_interpreter_runtime_config: CodeInterpreterRuntimeConfig = proto.Field( + proto.MESSAGE, + number=2, + oneof="GoogleFirstPartyExtensionConfig", + message=CodeInterpreterRuntimeConfig, + ) + vertex_ai_search_runtime_config: VertexAISearchRuntimeConfig = proto.Field( + proto.MESSAGE, + number=6, + oneof="GoogleFirstPartyExtensionConfig", + message=VertexAISearchRuntimeConfig, + ) + default_params: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Struct, + ) + + +class ExtensionPrivateServiceConnectConfig(proto.Message): + r"""PrivateExtensionConfig configuration for the extension. + + Attributes: + service_directory (str): + Required. The Service Directory resource name in which the + service endpoints associated to the extension are + registered. Format: + ``projects/{project_id}/locations/{location_id}/namespaces/{namespace_id}/services/{service_id}`` + + - The Vertex AI Extension Service Agent + (https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) + should be granted ``servicedirectory.viewer`` and + ``servicedirectory.pscAuthorizedService`` roles on the + resource. + """ + + service_directory: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/extension_execution_service.py b/google/cloud/aiplatform_v1beta1/types/extension_execution_service.py new file mode 100644 index 0000000000..50f75c2aaa --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/extension_execution_service.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import content as gca_content +from google.cloud.aiplatform_v1beta1.types import extension +from google.protobuf import struct_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "ExecuteExtensionRequest", + "ExecuteExtensionResponse", + "QueryExtensionRequest", + "QueryExtensionResponse", + }, +) + + +class ExecuteExtensionRequest(proto.Message): + r"""Request message for + [ExtensionExecutionService.ExecuteExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.ExecuteExtension]. + + Attributes: + name (str): + Required. Name (identifier) of the extension; Format: + ``projects/{project}/locations/{location}/extensions/{extension}`` + operation_id (str): + Required. The desired ID of the operation to be executed in + this extension as defined in + [ExtensionOperation.operation_id][google.cloud.aiplatform.v1beta1.ExtensionOperation.operation_id]. + operation_params (google.protobuf.struct_pb2.Struct): + Optional. Request parameters that will be + used for executing this operation. + + The struct should be in a form of map with param + name as the key and actual param value as the + value. + E.g. If this operation requires a param "name" + to be set to "abc". you can set this to + something like {"name": "abc"}. + runtime_auth_config (google.cloud.aiplatform_v1beta1.types.AuthConfig): + Optional. Auth config provided at runtime to override the + default value in [Extension.manifest.auth_config][]. The + AuthConfig.auth_type should match the value in + [Extension.manifest.auth_config][]. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + operation_id: str = proto.Field( + proto.STRING, + number=2, + ) + operation_params: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Struct, + ) + runtime_auth_config: extension.AuthConfig = proto.Field( + proto.MESSAGE, + number=4, + message=extension.AuthConfig, + ) + + +class ExecuteExtensionResponse(proto.Message): + r"""Response message for + [ExtensionExecutionService.ExecuteExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.ExecuteExtension]. + + Attributes: + content (str): + Response content from the extension. The + content should be conformant to the + response.content schema in the extension's + manifest/OpenAPI spec. + """ + + content: str = proto.Field( + proto.STRING, + number=2, + ) + + +class QueryExtensionRequest(proto.Message): + r"""Request message for + [ExtensionExecutionService.QueryExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.QueryExtension]. + + Attributes: + name (str): + Required. Name (identifier) of the extension; Format: + ``projects/{project}/locations/{location}/extensions/{extension}`` + contents (MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]): + Required. The content of the current + conversation with the model. + For single-turn queries, this is a single + instance. For multi-turn queries, this is a + repeated field that contains conversation + history + latest request. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + contents: MutableSequence[gca_content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=gca_content.Content, + ) + + +class QueryExtensionResponse(proto.Message): + r"""Response message for + [ExtensionExecutionService.QueryExtension][google.cloud.aiplatform.v1beta1.ExtensionExecutionService.QueryExtension]. + + Attributes: + steps (MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]): + Steps of extension or LLM interaction, can + contain function call, function response, or + text response. The last step contains the final + response to the query. + failure_message (str): + Failure message if any. + """ + + steps: MutableSequence[gca_content.Content] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_content.Content, + ) + failure_message: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/extension_registry_service.py b/google/cloud/aiplatform_v1beta1/types/extension_registry_service.py new file mode 100644 index 0000000000..196d1a4be4 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/extension_registry_service.py @@ -0,0 +1,226 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import extension as gca_extension +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "ImportExtensionRequest", + "ImportExtensionOperationMetadata", + "GetExtensionRequest", + "UpdateExtensionRequest", + "ListExtensionsRequest", + "ListExtensionsResponse", + "DeleteExtensionRequest", + }, +) + + +class ImportExtensionRequest(proto.Message): + r"""Request message for + [ExtensionRegistryService.ImportExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ImportExtension]. + + Attributes: + parent (str): + Required. The resource name of the Location to import the + Extension in. Format: + ``projects/{project}/locations/{location}`` + extension (google.cloud.aiplatform_v1beta1.types.Extension): + Required. The Extension to import. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + extension: gca_extension.Extension = proto.Field( + proto.MESSAGE, + number=2, + message=gca_extension.Extension, + ) + + +class ImportExtensionOperationMetadata(proto.Message): + r"""Details of + [ExtensionRegistryService.ImportExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ImportExtension] + operation. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The common part of the operation metadata. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetExtensionRequest(proto.Message): + r"""Request message for + [ExtensionRegistryService.GetExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.GetExtension]. + + Attributes: + name (str): + Required. The name of the Extension resource. Format: + ``projects/{project}/locations/{location}/extensions/{extension}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpdateExtensionRequest(proto.Message): + r"""Request message for + [ExtensionRegistryService.UpdateExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.UpdateExtension]. + + Attributes: + extension (google.cloud.aiplatform_v1beta1.types.Extension): + Required. The Extension which replaces the + resource on the server. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. Mask specifying which fields to update. Supported + fields: + + :: + + * `display_name` + * `description` + * `tool_use_examples` + """ + + extension: gca_extension.Extension = proto.Field( + proto.MESSAGE, + number=1, + message=gca_extension.Extension, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class ListExtensionsRequest(proto.Message): + r"""Request message for + [ExtensionRegistryService.ListExtensions][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ListExtensions]. + + Attributes: + parent (str): + Required. The resource name of the Location to list the + Extensions from. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. The standard list filter. Supported fields: \* + ``display_name`` \* ``create_time`` \* ``update_time`` + + More detail in `AIP-160 `__. + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. + order_by (str): + Optional. A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + + Example: ``display_name, create_time desc``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=2, + ) + page_size: int = proto.Field( + proto.INT32, + number=3, + ) + page_token: str = proto.Field( + proto.STRING, + number=4, + ) + order_by: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListExtensionsResponse(proto.Message): + r"""Response message for + [ExtensionRegistryService.ListExtensions][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ListExtensions] + + Attributes: + extensions (MutableSequence[google.cloud.aiplatform_v1beta1.types.Extension]): + List of Extension in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListExtensionsRequest.page_token][google.cloud.aiplatform.v1beta1.ListExtensionsRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + extensions: MutableSequence[gca_extension.Extension] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_extension.Extension, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteExtensionRequest(proto.Message): + r"""Request message for + [ExtensionRegistryService.DeleteExtension][google.cloud.aiplatform.v1beta1.ExtensionRegistryService.DeleteExtension]. + + Attributes: + name (str): + Required. The name of the Extension resource to be deleted. + Format: + ``projects/{project}/locations/{location}/extensions/{extension}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/feature_online_store.py b/google/cloud/aiplatform_v1beta1/types/feature_online_store.py index bd0b0fb3d9..ef37abc9d7 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_online_store.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_online_store.py @@ -97,8 +97,10 @@ class FeatureOnlineStore(proto.Message): this FeatureOnlineStore, which is different from common Vertex service endpoint. embedding_management (google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore.EmbeddingManagement): - Optional. The settings for embedding - management in FeatureOnlineStore. + Optional. Deprecated: This field is no longer + needed anymore and embedding management is + automatically enabled when specifying Optimized + storage type. """ class State(proto.Enum): @@ -180,12 +182,9 @@ class Optimized(proto.Message): r"""Optimized storage type""" class DedicatedServingEndpoint(proto.Message): - r"""The dedicated serving endpoint for this FeatureOnlineStore. Only - need to set when you choose Optimized storage type or enable - EmbeddingManagement. Will use public endpoint by default. Note, for - EmbeddingManagement use case, only - [DedicatedServingEndpoint.public_endpoint_domain_name] is available - now. + r"""The dedicated serving endpoint for this FeatureOnlineStore. + Only need to set when you choose Optimized storage type. Public + endpoint is provisioned by default. Attributes: public_endpoint_domain_name (str): @@ -222,7 +221,10 @@ class DedicatedServingEndpoint(proto.Message): ) class EmbeddingManagement(proto.Message): - r"""Contains settings for embedding management. + r"""Deprecated: This sub message is no longer needed anymore and + embedding management is automatically enabled when specifying + Optimized storage type. Contains settings for embedding + management. Attributes: enabled (bool): diff --git a/google/cloud/aiplatform_v1beta1/types/feature_view.py b/google/cloud/aiplatform_v1beta1/types/feature_view.py index 9c2ab71815..bc2f15ee1f 100644 --- a/google/cloud/aiplatform_v1beta1/types/feature_view.py +++ b/google/cloud/aiplatform_v1beta1/types/feature_view.py @@ -92,6 +92,13 @@ class FeatureView(proto.Message): Optional. Deprecated: please use [FeatureView.index_config][google.cloud.aiplatform.v1beta1.FeatureView.index_config] instead. + index_config (google.cloud.aiplatform_v1beta1.types.FeatureView.IndexConfig): + Optional. Configuration for index preparation + for vector search. It contains the required + configurations to create an index from source + data, so that approximate nearest neighbor + (a.k.a ANN) algorithms search can be performed + during online serving. service_agent_type (google.cloud.aiplatform_v1beta1.types.FeatureView.ServiceAgentType): Optional. Service agent type used during data sync. By default, the Vertex AI Service Agent is used. When using an @@ -211,9 +218,14 @@ class VectorSearchConfig(proto.Message): crowding_column (str): Optional. Column of crowding. This column contains crowding attribute which is a constraint on a neighbor list produced - by nearest neighbor search requiring that no more than some - value k' of the k neighbors returned have the same value of - crowding_attribute. + by + [FeatureOnlineStoreService.SearchNearestEntities][google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.SearchNearestEntities] + to diversify search results. If + [NearestNeighborQuery.per_crowding_attribute_neighbor_count][google.cloud.aiplatform.v1beta1.NearestNeighborQuery.per_crowding_attribute_neighbor_count] + is set to K in + [SearchNearestEntitiesRequest][google.cloud.aiplatform.v1beta1.SearchNearestEntitiesRequest], + it's guaranteed that no more than K entities of the same + crowding attribute are returned in the response. embedding_dimension (int): Optional. The number of dimensions of the input embedding. @@ -310,6 +322,147 @@ class TreeAHConfig(proto.Message): ) ) + class IndexConfig(proto.Message): + r"""Configuration for vector indexing. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + tree_ah_config (google.cloud.aiplatform_v1beta1.types.FeatureView.IndexConfig.TreeAHConfig): + Optional. Configuration options for the + tree-AH algorithm (Shallow tree + + Asymmetric Hashing). Please refer to this + paper for more details: + + https://arxiv.org/abs/1908.10396 + + This field is a member of `oneof`_ ``algorithm_config``. + brute_force_config (google.cloud.aiplatform_v1beta1.types.FeatureView.IndexConfig.BruteForceConfig): + Optional. Configuration options for using + brute force search, which simply implements the + standard linear search in the database for each + query. It is primarily meant for benchmarking + and to generate the ground truth for approximate + search. + + This field is a member of `oneof`_ ``algorithm_config``. + embedding_column (str): + Optional. Column of embedding. This column contains the + source data to create index for vector search. + embedding_column must be set when using vector search. + filter_columns (MutableSequence[str]): + Optional. Columns of features that're used to + filter vector search results. + crowding_column (str): + Optional. Column of crowding. This column contains crowding + attribute which is a constraint on a neighbor list produced + by + [FeatureOnlineStoreService.SearchNearestEntities][google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.SearchNearestEntities] + to diversify search results. If + [NearestNeighborQuery.per_crowding_attribute_neighbor_count][google.cloud.aiplatform.v1beta1.NearestNeighborQuery.per_crowding_attribute_neighbor_count] + is set to K in + [SearchNearestEntitiesRequest][google.cloud.aiplatform.v1beta1.SearchNearestEntitiesRequest], + it's guaranteed that no more than K entities of the same + crowding attribute are returned in the response. + embedding_dimension (int): + Optional. The number of dimensions of the + input embedding. + + This field is a member of `oneof`_ ``_embedding_dimension``. + distance_measure_type (google.cloud.aiplatform_v1beta1.types.FeatureView.IndexConfig.DistanceMeasureType): + Optional. The distance measure used in + nearest neighbor search. + """ + + class DistanceMeasureType(proto.Enum): + r"""The distance measure used in nearest neighbor search. + + Values: + DISTANCE_MEASURE_TYPE_UNSPECIFIED (0): + Should not be set. + SQUARED_L2_DISTANCE (1): + Euclidean (L_2) Distance. + COSINE_DISTANCE (2): + Cosine Distance. Defined as 1 - cosine similarity. + + We strongly suggest using DOT_PRODUCT_DISTANCE + + UNIT_L2_NORM instead of COSINE distance. Our algorithms have + been more optimized for DOT_PRODUCT distance which, when + combined with UNIT_L2_NORM, is mathematically equivalent to + COSINE distance and results in the same ranking. + DOT_PRODUCT_DISTANCE (3): + Dot Product Distance. Defined as a negative + of the dot product. + """ + DISTANCE_MEASURE_TYPE_UNSPECIFIED = 0 + SQUARED_L2_DISTANCE = 1 + COSINE_DISTANCE = 2 + DOT_PRODUCT_DISTANCE = 3 + + class BruteForceConfig(proto.Message): + r"""Configuration options for using brute force search.""" + + class TreeAHConfig(proto.Message): + r"""Configuration options for the tree-AH algorithm. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + leaf_node_embedding_count (int): + Optional. Number of embeddings on each leaf + node. The default value is 1000 if not set. + + This field is a member of `oneof`_ ``_leaf_node_embedding_count``. + """ + + leaf_node_embedding_count: int = proto.Field( + proto.INT64, + number=1, + optional=True, + ) + + tree_ah_config: "FeatureView.IndexConfig.TreeAHConfig" = proto.Field( + proto.MESSAGE, + number=6, + oneof="algorithm_config", + message="FeatureView.IndexConfig.TreeAHConfig", + ) + brute_force_config: "FeatureView.IndexConfig.BruteForceConfig" = proto.Field( + proto.MESSAGE, + number=7, + oneof="algorithm_config", + message="FeatureView.IndexConfig.BruteForceConfig", + ) + embedding_column: str = proto.Field( + proto.STRING, + number=1, + ) + filter_columns: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=2, + ) + crowding_column: str = proto.Field( + proto.STRING, + number=3, + ) + embedding_dimension: int = proto.Field( + proto.INT32, + number=4, + optional=True, + ) + distance_measure_type: "FeatureView.IndexConfig.DistanceMeasureType" = ( + proto.Field( + proto.ENUM, + number=5, + enum="FeatureView.IndexConfig.DistanceMeasureType", + ) + ) + class FeatureRegistrySource(proto.Message): r"""A Feature Registry source for features that need to be synced to Online Store. @@ -407,6 +560,11 @@ class FeatureGroup(proto.Message): number=8, message=VectorSearchConfig, ) + index_config: IndexConfig = proto.Field( + proto.MESSAGE, + number=15, + message=IndexConfig, + ) service_agent_type: ServiceAgentType = proto.Field( proto.ENUM, number=14, diff --git a/google/cloud/aiplatform_v1beta1/types/io.py b/google/cloud/aiplatform_v1beta1/types/io.py index cfb13c024e..53cc33df51 100644 --- a/google/cloud/aiplatform_v1beta1/types/io.py +++ b/google/cloud/aiplatform_v1beta1/types/io.py @@ -32,6 +32,8 @@ "CsvDestination", "TFRecordDestination", "ContainerRegistryDestination", + "GoogleDriveSource", + "DirectUploadSource", }, ) @@ -199,4 +201,63 @@ class ContainerRegistryDestination(proto.Message): ) +class GoogleDriveSource(proto.Message): + r"""The Google Drive location for the input content. + + Attributes: + resource_ids (MutableSequence[google.cloud.aiplatform_v1beta1.types.GoogleDriveSource.ResourceId]): + Required. Google Drive resource IDs. + """ + + class ResourceId(proto.Message): + r"""The type and ID of the Google Drive resource. + + Attributes: + resource_type (google.cloud.aiplatform_v1beta1.types.GoogleDriveSource.ResourceId.ResourceType): + Required. The type of the Google Drive + resource. + resource_id (str): + Required. The ID of the Google Drive + resource. + """ + + class ResourceType(proto.Enum): + r"""The type of the Google Drive resource. + + Values: + RESOURCE_TYPE_UNSPECIFIED (0): + Unspecified resource type. + RESOURCE_TYPE_FILE (1): + File resource type. + RESOURCE_TYPE_FOLDER (2): + Folder resource type. + """ + RESOURCE_TYPE_UNSPECIFIED = 0 + RESOURCE_TYPE_FILE = 1 + RESOURCE_TYPE_FOLDER = 2 + + resource_type: "GoogleDriveSource.ResourceId.ResourceType" = proto.Field( + proto.ENUM, + number=1, + enum="GoogleDriveSource.ResourceId.ResourceType", + ) + resource_id: str = proto.Field( + proto.STRING, + number=2, + ) + + resource_ids: MutableSequence[ResourceId] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=ResourceId, + ) + + +class DirectUploadSource(proto.Message): + r"""The input content is encapsulated and uploaded in the + request. + + """ + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/machine_resources.py b/google/cloud/aiplatform_v1beta1/types/machine_resources.py index c0c1f53c9f..2012c50cff 100644 --- a/google/cloud/aiplatform_v1beta1/types/machine_resources.py +++ b/google/cloud/aiplatform_v1beta1/types/machine_resources.py @@ -36,6 +36,7 @@ "PersistentDiskSpec", "NfsMount", "AutoscalingMetricSpec", + "ShieldedVmConfig", }, ) @@ -389,4 +390,27 @@ class AutoscalingMetricSpec(proto.Message): ) +class ShieldedVmConfig(proto.Message): + r"""A set of Shielded Instance options. See `Images using supported + Shielded VM + features `__. + + Attributes: + enable_secure_boot (bool): + Defines whether the instance has `Secure + Boot `__ + enabled. + + Secure Boot helps ensure that the system only runs authentic + software by verifying the digital signature of all boot + components, and halting the boot process if signature + verification fails. + """ + + enable_secure_boot: bool = proto.Field( + proto.BOOL, + number=1, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/network_spec.py b/google/cloud/aiplatform_v1beta1/types/network_spec.py new file mode 100644 index 0000000000..62bb139ad5 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/network_spec.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "NetworkSpec", + }, +) + + +class NetworkSpec(proto.Message): + r"""Network spec. + + Attributes: + enable_internet_access (bool): + Whether to enable public internet access. + Default false. + network (str): + The full name of the Google Compute Engine + `network `__ + subnetwork (str): + The name of the subnet that this instance is in. Format: + ``projects/{project_id_or_number}/regions/{region}/subnetworks/{subnetwork_id}`` + """ + + enable_internet_access: bool = proto.Field( + proto.BOOL, + number=1, + ) + network: str = proto.Field( + proto.STRING, + number=2, + ) + subnetwork: str = proto.Field( + proto.STRING, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/notebook_euc_config.py b/google/cloud/aiplatform_v1beta1/types/notebook_euc_config.py new file mode 100644 index 0000000000..94e9996c97 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/notebook_euc_config.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "NotebookEucConfig", + }, +) + + +class NotebookEucConfig(proto.Message): + r"""The euc configuration of NotebookRuntimeTemplate. + + Attributes: + euc_disabled (bool): + Input only. Whether EUC is disabled in this + NotebookRuntimeTemplate. In proto3, the default + value of a boolean is false. In this way, by + default EUC will be enabled for + NotebookRuntimeTemplate. + bypass_actas_check (bool): + Output only. Whether ActAs check is bypassed + for service account attached to the VM. If + false, we need ActAs check for the default + Compute Engine Service account. When a Runtime + is created, a VM is allocated using Default + Compute Engine Service Account. Any user + requesting to use this Runtime requires Service + Account User (ActAs) permission over this SA. If + true, Runtime owner is using EUC and does not + require the above permission as VM no longer use + default Compute Engine SA, but a P4SA. + """ + + euc_disabled: bool = proto.Field( + proto.BOOL, + number=1, + ) + bypass_actas_check: bool = proto.Field( + proto.BOOL, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/notebook_idle_shutdown_config.py b/google/cloud/aiplatform_v1beta1/types/notebook_idle_shutdown_config.py new file mode 100644 index 0000000000..9ba065b84e --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/notebook_idle_shutdown_config.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "NotebookIdleShutdownConfig", + }, +) + + +class NotebookIdleShutdownConfig(proto.Message): + r"""The idle shutdown configuration of NotebookRuntimeTemplate, which + contains the idle_timeout as required field. + + Attributes: + idle_timeout (google.protobuf.duration_pb2.Duration): + Required. Duration is accurate to the second. In Notebook, + Idle Timeout is accurate to minute so the range of + idle_timeout (second) is: 10 \* 60 ~ 1440 + + - + + 60. + idle_shutdown_disabled (bool): + Whether Idle Shutdown is disabled in this + NotebookRuntimeTemplate. + """ + + idle_timeout: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + idle_shutdown_disabled: bool = proto.Field( + proto.BOOL, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/notebook_runtime.py b/google/cloud/aiplatform_v1beta1/types/notebook_runtime.py new file mode 100644 index 0000000000..044e0722ab --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/notebook_runtime.py @@ -0,0 +1,443 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import network_spec as gca_network_spec +from google.cloud.aiplatform_v1beta1.types import notebook_euc_config +from google.cloud.aiplatform_v1beta1.types import notebook_idle_shutdown_config +from google.cloud.aiplatform_v1beta1.types import ( + notebook_runtime_template_ref as gca_notebook_runtime_template_ref, +) +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "NotebookRuntimeType", + "NotebookRuntimeTemplate", + "NotebookRuntime", + }, +) + + +class NotebookRuntimeType(proto.Enum): + r"""Represents a notebook runtime type. + + Values: + NOTEBOOK_RUNTIME_TYPE_UNSPECIFIED (0): + Unspecified notebook runtime type, NotebookRuntimeType will + default to USER_DEFINED. + USER_DEFINED (1): + runtime or template with coustomized + configurations from user. + ONE_CLICK (2): + runtime or template with system defined + configurations. + """ + NOTEBOOK_RUNTIME_TYPE_UNSPECIFIED = 0 + USER_DEFINED = 1 + ONE_CLICK = 2 + + +class NotebookRuntimeTemplate(proto.Message): + r"""A template that specifies runtime configurations such as + machine type, runtime version, network configurations, etc. + Multiple runtimes can be created from a runtime template. + + Attributes: + name (str): + Output only. The resource name of the + NotebookRuntimeTemplate. + display_name (str): + Required. The display name of the + NotebookRuntimeTemplate. The name can be up to + 128 characters long and can consist of any UTF-8 + characters. + description (str): + The description of the + NotebookRuntimeTemplate. + is_default (bool): + Output only. The default template to use if + not specified. + machine_spec (google.cloud.aiplatform_v1beta1.types.MachineSpec): + Optional. Immutable. The specification of a + single machine for the template. + data_persistent_disk_spec (google.cloud.aiplatform_v1beta1.types.PersistentDiskSpec): + Optional. The specification of [persistent + disk][https://cloud.google.com/compute/docs/disks/persistent-disks] + attached to the runtime as data disk storage. + network_spec (google.cloud.aiplatform_v1beta1.types.NetworkSpec): + Optional. Network spec. + service_account (str): + The service account that the runtime workload runs as. You + can use any service account within the same project, but you + must have the service account user permission to use the + instance. + + If not specified, the `Compute Engine default service + account `__ + is used. + etag (str): + Used to perform consistent read-modify-write + updates. If not set, a blind "overwrite" update + happens. + labels (MutableMapping[str, str]): + The labels with user-defined metadata to + organize the NotebookRuntimeTemplates. + + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + + See https://goo.gl/xmQnxf for more information + and examples of labels. + idle_shutdown_config (google.cloud.aiplatform_v1beta1.types.NotebookIdleShutdownConfig): + The idle shutdown configuration of + NotebookRuntimeTemplate. This config will only + be set when idle shutdown is enabled. + euc_config (google.cloud.aiplatform_v1beta1.types.NotebookEucConfig): + EUC configuration of the + NotebookRuntimeTemplate. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + NotebookRuntimeTemplate was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + NotebookRuntimeTemplate was most recently + updated. + notebook_runtime_type (google.cloud.aiplatform_v1beta1.types.NotebookRuntimeType): + Optional. Immutable. The type of the notebook + runtime template. + shielded_vm_config (google.cloud.aiplatform_v1beta1.types.ShieldedVmConfig): + Optional. Immutable. Runtime Shielded VM + spec. + network_tags (MutableSequence[str]): + Optional. The Compute Engine tags to add to runtime (see + `Tagging + instances `__). + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + description: str = proto.Field( + proto.STRING, + number=3, + ) + is_default: bool = proto.Field( + proto.BOOL, + number=4, + ) + machine_spec: machine_resources.MachineSpec = proto.Field( + proto.MESSAGE, + number=5, + message=machine_resources.MachineSpec, + ) + data_persistent_disk_spec: machine_resources.PersistentDiskSpec = proto.Field( + proto.MESSAGE, + number=8, + message=machine_resources.PersistentDiskSpec, + ) + network_spec: gca_network_spec.NetworkSpec = proto.Field( + proto.MESSAGE, + number=12, + message=gca_network_spec.NetworkSpec, + ) + service_account: str = proto.Field( + proto.STRING, + number=13, + ) + etag: str = proto.Field( + proto.STRING, + number=14, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=15, + ) + idle_shutdown_config: notebook_idle_shutdown_config.NotebookIdleShutdownConfig = ( + proto.Field( + proto.MESSAGE, + number=17, + message=notebook_idle_shutdown_config.NotebookIdleShutdownConfig, + ) + ) + euc_config: notebook_euc_config.NotebookEucConfig = proto.Field( + proto.MESSAGE, + number=18, + message=notebook_euc_config.NotebookEucConfig, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=11, + message=timestamp_pb2.Timestamp, + ) + notebook_runtime_type: "NotebookRuntimeType" = proto.Field( + proto.ENUM, + number=19, + enum="NotebookRuntimeType", + ) + shielded_vm_config: machine_resources.ShieldedVmConfig = proto.Field( + proto.MESSAGE, + number=20, + message=machine_resources.ShieldedVmConfig, + ) + network_tags: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=21, + ) + + +class NotebookRuntime(proto.Message): + r"""A runtime is a virtual machine allocated to a particular user + for a particular Notebook file on temporary basis with lifetime + limited to 24 hours. + + Attributes: + name (str): + Output only. The resource name of the + NotebookRuntime. + runtime_user (str): + Required. The user email of the + NotebookRuntime. + notebook_runtime_template_ref (google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplateRef): + Output only. The pointer to + NotebookRuntimeTemplate this NotebookRuntime is + created from. + proxy_uri (str): + Output only. The proxy endpoint used to + access the NotebookRuntime. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + NotebookRuntime was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + NotebookRuntime was most recently updated. + health_state (google.cloud.aiplatform_v1beta1.types.NotebookRuntime.HealthState): + Output only. The health state of the + NotebookRuntime. + display_name (str): + Required. The display name of the + NotebookRuntime. The name can be up to 128 + characters long and can consist of any UTF-8 + characters. + description (str): + The description of the NotebookRuntime. + service_account (str): + Output only. The service account that the + NotebookRuntime workload runs as. + runtime_state (google.cloud.aiplatform_v1beta1.types.NotebookRuntime.RuntimeState): + Output only. The runtime (instance) state of + the NotebookRuntime. + is_upgradable (bool): + Output only. Whether NotebookRuntime is + upgradable. + labels (MutableMapping[str, str]): + The labels with user-defined metadata to organize your + NotebookRuntime. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. No more than 64 user labels can be + associated with one NotebookRuntime (System labels are + excluded). + + See https://goo.gl/xmQnxf for more information and examples + of labels. System reserved label keys are prefixed with + "aiplatform.googleapis.com/" and are immutable. Following + system labels exist for NotebookRuntime: + + - "aiplatform.googleapis.com/notebook_runtime_gce_instance_id": + output only, its value is the Compute Engine instance id. + - "aiplatform.googleapis.com/colab_enterprise_entry_service": + its value is either "bigquery" or "vertex"; if absent, it + should be "vertex". This is to describe the entry + service, either BigQuery or Vertex. + expiration_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this + NotebookRuntime will be expired: + + 1. System Predefined NotebookRuntime: 24 hours + after creation. After expiration, system + predifined runtime will be deleted. + 2. User created NotebookRuntime: 6 months after + last upgrade. After expiration, user created + runtime will be stopped and allowed for + upgrade. + version (str): + Output only. The VM os image version of + NotebookRuntime. + notebook_runtime_type (google.cloud.aiplatform_v1beta1.types.NotebookRuntimeType): + Output only. The type of the notebook + runtime. + network_tags (MutableSequence[str]): + Optional. The Compute Engine tags to add to runtime (see + `Tagging + instances `__). + """ + + class HealthState(proto.Enum): + r"""The substate of the NotebookRuntime to display health + information. + + Values: + HEALTH_STATE_UNSPECIFIED (0): + Unspecified health state. + HEALTHY (1): + NotebookRuntime is in healthy state. Applies + to ACTIVE state. + UNHEALTHY (2): + NotebookRuntime is in unhealthy state. + Applies to ACTIVE state. + """ + HEALTH_STATE_UNSPECIFIED = 0 + HEALTHY = 1 + UNHEALTHY = 2 + + class RuntimeState(proto.Enum): + r"""The substate of the NotebookRuntime to display state of + runtime. The resource of NotebookRuntime is in ACTIVE state for + these sub state. + + Values: + RUNTIME_STATE_UNSPECIFIED (0): + Unspecified runtime state. + RUNNING (1): + NotebookRuntime is in running state. + BEING_STARTED (2): + NotebookRuntime is in starting state. + BEING_STOPPED (3): + NotebookRuntime is in stopping state. + STOPPED (4): + NotebookRuntime is in stopped state. + BEING_UPGRADED (5): + NotebookRuntime is in upgrading state. It is + in the middle of upgrading process. + ERROR (100): + NotebookRuntime was unable to start/stop + properly. + INVALID (101): + NotebookRuntime is in invalid state. Cannot + be recovered. + """ + RUNTIME_STATE_UNSPECIFIED = 0 + RUNNING = 1 + BEING_STARTED = 2 + BEING_STOPPED = 3 + STOPPED = 4 + BEING_UPGRADED = 5 + ERROR = 100 + INVALID = 101 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + runtime_user: str = proto.Field( + proto.STRING, + number=2, + ) + notebook_runtime_template_ref: gca_notebook_runtime_template_ref.NotebookRuntimeTemplateRef = proto.Field( + proto.MESSAGE, + number=3, + message=gca_notebook_runtime_template_ref.NotebookRuntimeTemplateRef, + ) + proxy_uri: str = proto.Field( + proto.STRING, + number=5, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + health_state: HealthState = proto.Field( + proto.ENUM, + number=8, + enum=HealthState, + ) + display_name: str = proto.Field( + proto.STRING, + number=10, + ) + description: str = proto.Field( + proto.STRING, + number=11, + ) + service_account: str = proto.Field( + proto.STRING, + number=13, + ) + runtime_state: RuntimeState = proto.Field( + proto.ENUM, + number=14, + enum=RuntimeState, + ) + is_upgradable: bool = proto.Field( + proto.BOOL, + number=15, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=16, + ) + expiration_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=17, + message=timestamp_pb2.Timestamp, + ) + version: str = proto.Field( + proto.STRING, + number=18, + ) + notebook_runtime_type: "NotebookRuntimeType" = proto.Field( + proto.ENUM, + number=19, + enum="NotebookRuntimeType", + ) + network_tags: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=25, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/notebook_runtime_template_ref.py b/google/cloud/aiplatform_v1beta1/types/notebook_runtime_template_ref.py new file mode 100644 index 0000000000..e60671be25 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/notebook_runtime_template_ref.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "NotebookRuntimeTemplateRef", + }, +) + + +class NotebookRuntimeTemplateRef(proto.Message): + r"""Points to a NotebookRuntimeTemplateRef. + + Attributes: + notebook_runtime_template (str): + Immutable. A resource name of the + NotebookRuntimeTemplate. + """ + + notebook_runtime_template: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/notebook_service.py b/google/cloud/aiplatform_v1beta1/types/notebook_service.py new file mode 100644 index 0000000000..68620b3dd9 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/notebook_service.py @@ -0,0 +1,596 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import ( + notebook_runtime as gca_notebook_runtime, +) +from google.cloud.aiplatform_v1beta1.types import operation +from google.protobuf import field_mask_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "CreateNotebookRuntimeTemplateRequest", + "CreateNotebookRuntimeTemplateOperationMetadata", + "GetNotebookRuntimeTemplateRequest", + "ListNotebookRuntimeTemplatesRequest", + "ListNotebookRuntimeTemplatesResponse", + "DeleteNotebookRuntimeTemplateRequest", + "AssignNotebookRuntimeRequest", + "AssignNotebookRuntimeOperationMetadata", + "GetNotebookRuntimeRequest", + "ListNotebookRuntimesRequest", + "ListNotebookRuntimesResponse", + "DeleteNotebookRuntimeRequest", + "UpgradeNotebookRuntimeRequest", + "UpgradeNotebookRuntimeOperationMetadata", + "UpgradeNotebookRuntimeResponse", + "StartNotebookRuntimeRequest", + "StartNotebookRuntimeOperationMetadata", + "StartNotebookRuntimeResponse", + }, +) + + +class CreateNotebookRuntimeTemplateRequest(proto.Message): + r"""Request message for + [NotebookService.CreateNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.CreateNotebookRuntimeTemplate]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + NotebookRuntimeTemplate. Format: + ``projects/{project}/locations/{location}`` + notebook_runtime_template (google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplate): + Required. The NotebookRuntimeTemplate to + create. + notebook_runtime_template_id (str): + Optional. User specified ID for the notebook + runtime template. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + notebook_runtime_template: gca_notebook_runtime.NotebookRuntimeTemplate = ( + proto.Field( + proto.MESSAGE, + number=2, + message=gca_notebook_runtime.NotebookRuntimeTemplate, + ) + ) + notebook_runtime_template_id: str = proto.Field( + proto.STRING, + number=3, + ) + + +class CreateNotebookRuntimeTemplateOperationMetadata(proto.Message): + r"""Metadata information for + [NotebookService.CreateNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.CreateNotebookRuntimeTemplate]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class GetNotebookRuntimeTemplateRequest(proto.Message): + r"""Request message for + [NotebookService.GetNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.GetNotebookRuntimeTemplate] + + Attributes: + name (str): + Required. The name of the NotebookRuntimeTemplate resource. + Format: + ``projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListNotebookRuntimeTemplatesRequest(proto.Message): + r"""Request message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimeTemplates]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the NotebookRuntimeTemplates. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``notebookRuntimeTemplate`` supports = and !=. + ``notebookRuntimeTemplate`` represents the + NotebookRuntimeTemplate ID, i.e. the last segment of the + NotebookRuntimeTemplate's [resource name] + [google.cloud.aiplatform.v1beta1.NotebookRuntimeTemplate.name]. + - ``display_name`` supports = and != + - ``labels`` supports general map functions that is: + + - ``labels.key=value`` - key:value equality + - \`labels.key:\* or labels:key - key existence + - A key including a space must be quoted. + ``labels."a key"``. + + - ``notebookRuntimeType`` supports = and !=. + notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. + + Some examples: + + - ``notebookRuntimeTemplate=notebookRuntimeTemplate123`` + - ``displayName="myDisplayName"`` + - ``labels.myKey="myValue"`` + - ``notebookRuntimeType=USER_DEFINED`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListNotebookRuntimeTemplatesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListNotebookRuntimeTemplatesResponse.next_page_token] + of the previous + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimeTemplates] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields to + read. + order_by (str): + Optional. A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + + Example: ``display_name, create_time desc``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=2, + ) + page_size: int = proto.Field( + proto.INT32, + number=3, + ) + page_token: str = proto.Field( + proto.STRING, + number=4, + ) + read_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListNotebookRuntimeTemplatesResponse(proto.Message): + r"""Response message for + [NotebookService.ListNotebookRuntimeTemplates][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimeTemplates]. + + Attributes: + notebook_runtime_templates (MutableSequence[google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplate]): + List of NotebookRuntimeTemplates in the + requested page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListNotebookRuntimeTemplatesRequest.page_token][google.cloud.aiplatform.v1beta1.ListNotebookRuntimeTemplatesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + notebook_runtime_templates: MutableSequence[ + gca_notebook_runtime.NotebookRuntimeTemplate + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_notebook_runtime.NotebookRuntimeTemplate, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteNotebookRuntimeTemplateRequest(proto.Message): + r"""Request message for + [NotebookService.DeleteNotebookRuntimeTemplate][google.cloud.aiplatform.v1beta1.NotebookService.DeleteNotebookRuntimeTemplate]. + + Attributes: + name (str): + Required. The name of the NotebookRuntimeTemplate resource + to be deleted. Format: + ``projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class AssignNotebookRuntimeRequest(proto.Message): + r"""Request message for + [NotebookService.AssignNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.AssignNotebookRuntime]. + + Attributes: + parent (str): + Required. The resource name of the Location to get the + NotebookRuntime assignment. Format: + ``projects/{project}/locations/{location}`` + notebook_runtime_template (str): + Required. The resource name of the + NotebookRuntimeTemplate based on which a + NotebookRuntime will be assigned (reuse or + create a new one). + notebook_runtime (google.cloud.aiplatform_v1beta1.types.NotebookRuntime): + Required. Provide runtime specific + information (e.g. runtime owner, notebook id) + used for NotebookRuntime assignment. + notebook_runtime_id (str): + Optional. User specified ID for the notebook + runtime. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + notebook_runtime_template: str = proto.Field( + proto.STRING, + number=2, + ) + notebook_runtime: gca_notebook_runtime.NotebookRuntime = proto.Field( + proto.MESSAGE, + number=3, + message=gca_notebook_runtime.NotebookRuntime, + ) + notebook_runtime_id: str = proto.Field( + proto.STRING, + number=4, + ) + + +class AssignNotebookRuntimeOperationMetadata(proto.Message): + r"""Metadata information for + [NotebookService.AssignNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.AssignNotebookRuntime]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + progress_message (str): + A human-readable message that shows the + intermediate progress details of + NotebookRuntime. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + progress_message: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetNotebookRuntimeRequest(proto.Message): + r"""Request message for + [NotebookService.GetNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.GetNotebookRuntime] + + Attributes: + name (str): + Required. The name of the NotebookRuntime + resource. Instead of checking whether the name + is in valid NotebookRuntime resource name + format, directly throw NotFound exception if + there is no such NotebookRuntime in spanner. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListNotebookRuntimesRequest(proto.Message): + r"""Request message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimes]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the NotebookRuntimes. Format: + ``projects/{project}/locations/{location}`` + filter (str): + Optional. An expression for filtering the results of the + request. For field names both snake_case and camelCase are + supported. + + - ``notebookRuntime`` supports = and !=. + ``notebookRuntime`` represents the NotebookRuntime ID, + i.e. the last segment of the NotebookRuntime's [resource + name] + [google.cloud.aiplatform.v1beta1.NotebookRuntime.name]. + - ``displayName`` supports = and != and regex. + - ``notebookRuntimeTemplate`` supports = and !=. + ``notebookRuntimeTemplate`` represents the + NotebookRuntimeTemplate ID, i.e. the last segment of the + NotebookRuntimeTemplate's [resource name] + [google.cloud.aiplatform.v1beta1.NotebookRuntimeTemplate.name]. + - ``healthState`` supports = and !=. healthState enum: + [HEALTHY, UNHEALTHY, HEALTH_STATE_UNSPECIFIED]. + - ``runtimeState`` supports = and !=. runtimeState enum: + [RUNTIME_STATE_UNSPECIFIED, RUNNING, BEING_STARTED, + BEING_STOPPED, STOPPED, BEING_UPGRADED, ERROR, INVALID]. + - ``runtimeUser`` supports = and !=. + - API version is UI only: ``uiState`` supports = and !=. + uiState enum: [UI_RESOURCE_STATE_UNSPECIFIED, + UI_RESOURCE_STATE_BEING_CREATED, + UI_RESOURCE_STATE_ACTIVE, + UI_RESOURCE_STATE_BEING_DELETED, + UI_RESOURCE_STATE_CREATION_FAILED]. + - ``notebookRuntimeType`` supports = and !=. + notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. + + Some examples: + + - ``notebookRuntime="notebookRuntime123"`` + - ``displayName="myDisplayName"`` and + ``displayName=~"myDisplayNameRegex"`` + - ``notebookRuntimeTemplate="notebookRuntimeTemplate321"`` + - ``healthState=HEALTHY`` + - ``runtimeState=RUNNING`` + - ``runtimeUser="test@google.com"`` + - ``uiState=UI_RESOURCE_STATE_BEING_DELETED`` + - ``notebookRuntimeType=USER_DEFINED`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListNotebookRuntimesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListNotebookRuntimesResponse.next_page_token] + of the previous + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimes] + call. + read_mask (google.protobuf.field_mask_pb2.FieldMask): + Optional. Mask specifying which fields to + read. + order_by (str): + Optional. A comma-separated list of fields to order by, + sorted in ascending order. Use "desc" after a field name for + descending. Supported fields: + + - ``display_name`` + - ``create_time`` + - ``update_time`` + + Example: ``display_name, create_time desc``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=2, + ) + page_size: int = proto.Field( + proto.INT32, + number=3, + ) + page_token: str = proto.Field( + proto.STRING, + number=4, + ) + read_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=5, + message=field_mask_pb2.FieldMask, + ) + order_by: str = proto.Field( + proto.STRING, + number=6, + ) + + +class ListNotebookRuntimesResponse(proto.Message): + r"""Response message for + [NotebookService.ListNotebookRuntimes][google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimes]. + + Attributes: + notebook_runtimes (MutableSequence[google.cloud.aiplatform_v1beta1.types.NotebookRuntime]): + List of NotebookRuntimes in the requested + page. + next_page_token (str): + A token to retrieve next page of results. Pass to + [ListNotebookRuntimesRequest.page_token][google.cloud.aiplatform.v1beta1.ListNotebookRuntimesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + notebook_runtimes: MutableSequence[ + gca_notebook_runtime.NotebookRuntime + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gca_notebook_runtime.NotebookRuntime, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteNotebookRuntimeRequest(proto.Message): + r"""Request message for + [NotebookService.DeleteNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.DeleteNotebookRuntime]. + + Attributes: + name (str): + Required. The name of the NotebookRuntime + resource to be deleted. Instead of checking + whether the name is in valid NotebookRuntime + resource name format, directly throw NotFound + exception if there is no such NotebookRuntime in + spanner. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpgradeNotebookRuntimeRequest(proto.Message): + r"""Request message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.UpgradeNotebookRuntime]. + + Attributes: + name (str): + Required. The name of the NotebookRuntime + resource to be upgrade. Instead of checking + whether the name is in valid NotebookRuntime + resource name format, directly throw NotFound + exception if there is no such NotebookRuntime in + spanner. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UpgradeNotebookRuntimeOperationMetadata(proto.Message): + r"""Metadata information for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.UpgradeNotebookRuntime]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + progress_message (str): + A human-readable message that shows the + intermediate progress details of + NotebookRuntime. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + progress_message: str = proto.Field( + proto.STRING, + number=2, + ) + + +class UpgradeNotebookRuntimeResponse(proto.Message): + r"""Response message for + [NotebookService.UpgradeNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.UpgradeNotebookRuntime]. + + """ + + +class StartNotebookRuntimeRequest(proto.Message): + r"""Request message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.StartNotebookRuntime]. + + Attributes: + name (str): + Required. The name of the NotebookRuntime + resource to be started. Instead of checking + whether the name is in valid NotebookRuntime + resource name format, directly throw NotFound + exception if there is no such NotebookRuntime in + spanner. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class StartNotebookRuntimeOperationMetadata(proto.Message): + r"""Metadata information for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.StartNotebookRuntime]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + progress_message (str): + A human-readable message that shows the + intermediate progress details of + NotebookRuntime. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + progress_message: str = proto.Field( + proto.STRING, + number=2, + ) + + +class StartNotebookRuntimeResponse(proto.Message): + r"""Response message for + [NotebookService.StartNotebookRuntime][google.cloud.aiplatform.v1beta1.NotebookService.StartNotebookRuntime]. + + """ + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/openapi.py b/google/cloud/aiplatform_v1beta1/types/openapi.py index ff2c76f9d5..eac939d725 100644 --- a/google/cloud/aiplatform_v1beta1/types/openapi.py +++ b/google/cloud/aiplatform_v1beta1/types/openapi.py @@ -73,14 +73,25 @@ class Schema(proto.Message): Optional. The format of the data. Supported formats: - for NUMBER type: float, double - for INTEGER type: int32, int64 + for NUMBER type: "float", "double" + for INTEGER type: "int32", "int64" + for STRING type: "email", "byte", etc + title (str): + Optional. The title of the Schema. description (str): Optional. The description of the data. nullable (bool): Optional. Indicates if the value may be null. + default (google.protobuf.struct_pb2.Value): + Optional. Default value of the data. items (google.cloud.aiplatform_v1beta1.types.Schema): - Optional. Schema of the elements of + Optional. SCHEMA FIELDS FOR TYPE ARRAY + Schema of the elements of Type.ARRAY. + min_items (int): + Optional. Minimum number of the elements for + Type.ARRAY. + max_items (int): + Optional. Maximum number of the elements for Type.ARRAY. enum (MutableSequence[str]): Optional. Possible values of the element of Type.STRING with @@ -88,9 +99,31 @@ class Schema(proto.Message): : {type:STRING, format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} properties (MutableMapping[str, google.cloud.aiplatform_v1beta1.types.Schema]): - Optional. Properties of Type.OBJECT. + Optional. SCHEMA FIELDS FOR TYPE OBJECT + Properties of Type.OBJECT. required (MutableSequence[str]): Optional. Required properties of Type.OBJECT. + min_properties (int): + Optional. Minimum number of the properties + for Type.OBJECT. + max_properties (int): + Optional. Maximum number of the properties + for Type.OBJECT. + minimum (float): + Optional. SCHEMA FIELDS FOR TYPE INTEGER and + NUMBER Minimum value of the Type.INTEGER and + Type.NUMBER + maximum (float): + Optional. Maximum value of the Type.INTEGER + and Type.NUMBER + min_length (int): + Optional. SCHEMA FIELDS FOR TYPE STRING + Minimum length of the Type.STRING + max_length (int): + Optional. Maximum length of the Type.STRING + pattern (str): + Optional. Pattern of the Type.STRING to + restrict a string to a regular expression. example (google.protobuf.struct_pb2.Value): Optional. Example of the object. Will only populated when the object is the root. @@ -105,6 +138,10 @@ class Schema(proto.Message): proto.STRING, number=7, ) + title: str = proto.Field( + proto.STRING, + number=24, + ) description: str = proto.Field( proto.STRING, number=8, @@ -113,11 +150,24 @@ class Schema(proto.Message): proto.BOOL, number=6, ) + default: struct_pb2.Value = proto.Field( + proto.MESSAGE, + number=23, + message=struct_pb2.Value, + ) items: "Schema" = proto.Field( proto.MESSAGE, number=2, message="Schema", ) + min_items: int = proto.Field( + proto.INT64, + number=21, + ) + max_items: int = proto.Field( + proto.INT64, + number=22, + ) enum: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=9, @@ -132,6 +182,34 @@ class Schema(proto.Message): proto.STRING, number=5, ) + min_properties: int = proto.Field( + proto.INT64, + number=14, + ) + max_properties: int = proto.Field( + proto.INT64, + number=15, + ) + minimum: float = proto.Field( + proto.DOUBLE, + number=16, + ) + maximum: float = proto.Field( + proto.DOUBLE, + number=17, + ) + min_length: int = proto.Field( + proto.INT64, + number=18, + ) + max_length: int = proto.Field( + proto.INT64, + number=19, + ) + pattern: str = proto.Field( + proto.STRING, + number=20, + ) example: struct_pb2.Value = proto.Field( proto.MESSAGE, number=4, diff --git a/google/cloud/aiplatform_v1beta1/types/persistent_resource.py b/google/cloud/aiplatform_v1beta1/types/persistent_resource.py index 265b104804..a9c5a688fe 100644 --- a/google/cloud/aiplatform_v1beta1/types/persistent_resource.py +++ b/google/cloud/aiplatform_v1beta1/types/persistent_resource.py @@ -141,12 +141,22 @@ class State(proto.Enum): ERROR (5): The ERROR state indicates the persistent resource may be unusable. Details can be found in the ``error`` field. + REBOOTING (6): + The REBOOTING state indicates the persistent + resource is being rebooted (PR is not available + right now but is expected to be ready again + later). + UPDATING (7): + The UPDATING state indicates the persistent + resource is being updated. """ STATE_UNSPECIFIED = 0 PROVISIONING = 1 RUNNING = 3 STOPPING = 4 ERROR = 5 + REBOOTING = 6 + UPDATING = 7 name: str = proto.Field( proto.STRING, @@ -402,6 +412,15 @@ class ResourceRuntime(proto.Message): Output only. URIs for user to connect to the Cluster. Example: { "RAY_HEAD_NODE_INTERNAL_IP": "head-node-IP:10001" "RAY_DASHBOARD_URI": "ray-dashboard-address:8888" } + notebook_runtime_template (str): + Output only. The resource name of + NotebookRuntimeTemplate for the RoV Persistent + Cluster The NotebokRuntimeTemplate is created in + the same VPC (if set), and with the same Ray and + Python version as the Persistent Cluster. + Example: + + "projects/1000/locations/us-central1/notebookRuntimeTemplates/abc123". """ access_uris: MutableMapping[str, str] = proto.MapField( @@ -409,6 +428,10 @@ class ResourceRuntime(proto.Message): proto.STRING, number=1, ) + notebook_runtime_template: str = proto.Field( + proto.STRING, + number=2, + ) class ServiceAccountSpec(proto.Message): @@ -423,21 +446,20 @@ class ServiceAccountSpec(proto.Message): Service Agent `__. service_account (str): - Optional. Default service account that this - PersistentResource's workloads run as. The workloads - include: + Optional. Required when all below conditions are met - - Any runtime specified via ``ResourceRuntimeSpec`` on - creation time, for example, Ray. - - Jobs submitted to PersistentResource, if no other service - account specified in the job specs. + - ``enable_custom_service_account`` is true; + - any runtime is specified via ``ResourceRuntimeSpec`` on + creation time, for example, Ray - Only works when custom service account is enabled and users - have the ``iam.serviceAccounts.actAs`` permission on this - service account. + The users must have ``iam.serviceAccounts.actAs`` permission + on this service account and then the specified runtime + containers will run as it. - Required if any containers are specified in - ``ResourceRuntimeSpec``. + Do not set this field if you want to submit jobs using + custom service account to this PersistentResource after + creation, but only specify the ``service_account`` inside + the job. """ enable_custom_service_account: bool = proto.Field( diff --git a/google/cloud/aiplatform_v1beta1/types/persistent_resource_service.py b/google/cloud/aiplatform_v1beta1/types/persistent_resource_service.py index 349cb31e11..8cb65885ae 100644 --- a/google/cloud/aiplatform_v1beta1/types/persistent_resource_service.py +++ b/google/cloud/aiplatform_v1beta1/types/persistent_resource_service.py @@ -32,11 +32,13 @@ "CreatePersistentResourceRequest", "CreatePersistentResourceOperationMetadata", "UpdatePersistentResourceOperationMetadata", + "RebootPersistentResourceOperationMetadata", "GetPersistentResourceRequest", "ListPersistentResourcesRequest", "ListPersistentResourcesResponse", "DeletePersistentResourceRequest", "UpdatePersistentResourceRequest", + "RebootPersistentResourceRequest", }, ) @@ -118,6 +120,27 @@ class UpdatePersistentResourceOperationMetadata(proto.Message): ) +class RebootPersistentResourceOperationMetadata(proto.Message): + r"""Details of operations that perform reboot PersistentResource. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + Operation metadata for PersistentResource. + progress_message (str): + Progress Message for Reboot LRO + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + progress_message: str = proto.Field( + proto.STRING, + number=2, + ) + + class GetPersistentResourceRequest(proto.Message): r"""Request message for [PersistentResourceService.GetPersistentResource][google.cloud.aiplatform.v1beta1.PersistentResourceService.GetPersistentResource]. @@ -242,4 +265,21 @@ class UpdatePersistentResourceRequest(proto.Message): ) +class RebootPersistentResourceRequest(proto.Message): + r"""Request message for + [PersistentResourceService.RebootPersistentResource][google.cloud.aiplatform.v1beta1.PersistentResourceService.RebootPersistentResource]. + + Attributes: + name (str): + Required. The name of the PersistentResource resource. + Format: + ``projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/prediction_service.py b/google/cloud/aiplatform_v1beta1/types/prediction_service.py index 64f67b231b..464fd65386 100644 --- a/google/cloud/aiplatform_v1beta1/types/prediction_service.py +++ b/google/cloud/aiplatform_v1beta1/types/prediction_service.py @@ -51,6 +51,7 @@ "CountTokensResponse", "GenerateContentRequest", "GenerateContentResponse", + "ChatCompletionsRequest", }, ) @@ -766,6 +767,8 @@ class CountTokensResponse(proto.Message): class GenerateContentRequest(proto.Message): r"""Request message for [PredictionService.GenerateContent]. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: model (str): Required. The name of the publisher model requested to serve @@ -778,6 +781,13 @@ class GenerateContentRequest(proto.Message): instance. For multi-turn queries, this is a repeated field that contains conversation history + latest request. + system_instruction (google.cloud.aiplatform_v1beta1.types.Content): + Optional. The user provided system + instructions for the model. Note: only text + should be used in parts and content in each part + will be in a separate paragraph. + + This field is a member of `oneof`_ ``_system_instruction``. tools (MutableSequence[google.cloud.aiplatform_v1beta1.types.Tool]): Optional. A list of ``Tools`` the model may use to generate the next response. @@ -786,8 +796,8 @@ class GenerateContentRequest(proto.Message): interact with external systems to perform an action, or set of actions, outside of knowledge and scope of the model. tool_config (google.cloud.aiplatform_v1beta1.types.ToolConfig): - Tool config. This config is shared for all - tools provided in the request. + Optional. Tool config. This config is shared + for all tools provided in the request. safety_settings (MutableSequence[google.cloud.aiplatform_v1beta1.types.SafetySetting]): Optional. Per request settings for blocking unsafe content. Enforced on @@ -805,6 +815,12 @@ class GenerateContentRequest(proto.Message): number=2, message=content.Content, ) + system_instruction: content.Content = proto.Field( + proto.MESSAGE, + number=8, + optional=True, + message=content.Content, + ) tools: MutableSequence[tool.Tool] = proto.RepeatedField( proto.MESSAGE, number=6, @@ -865,10 +881,17 @@ class BlockedReason(proto.Enum): Candidates blocked due to safety. OTHER (2): Candidates blocked due to other reason. + BLOCKLIST (3): + Candidates blocked due to the terms which are + included from the terminology blocklist. + PROHIBITED_CONTENT (4): + Candidates blocked due to prohibited content. """ BLOCKED_REASON_UNSPECIFIED = 0 SAFETY = 1 OTHER = 2 + BLOCKLIST = 3 + PROHIBITED_CONTENT = 4 block_reason: "GenerateContentResponse.PromptFeedback.BlockedReason" = ( proto.Field( @@ -929,4 +952,28 @@ class UsageMetadata(proto.Message): ) +class ChatCompletionsRequest(proto.Message): + r"""Request message for [PredictionService.ChatCompletions] + + Attributes: + endpoint (str): + Required. The name of the Endpoint requested to serve the + prediction. Format: + ``projects/{project}/locations/{location}/endpoints/openapi`` + http_body (google.api.httpbody_pb2.HttpBody): + Optional. The prediction input. Supports HTTP + headers and arbitrary data payload. + """ + + endpoint: str = proto.Field( + proto.STRING, + number=1, + ) + http_body: httpbody_pb2.HttpBody = proto.Field( + proto.MESSAGE, + number=2, + message=httpbody_pb2.HttpBody, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/tool.py b/google/cloud/aiplatform_v1beta1/types/tool.py index 721bc719bd..26b5498661 100644 --- a/google/cloud/aiplatform_v1beta1/types/tool.py +++ b/google/cloud/aiplatform_v1beta1/types/tool.py @@ -27,10 +27,12 @@ package="google.cloud.aiplatform.v1beta1", manifest={ "Tool", + "ToolUseExample", "FunctionDeclaration", "FunctionCall", "FunctionResponse", "Retrieval", + "VertexRagStore", "VertexAISearch", "GoogleSearchRetrieval", "ToolConfig", @@ -89,6 +91,94 @@ class Tool(proto.Message): ) +class ToolUseExample(proto.Message): + r"""A single example of the tool usage. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + extension_operation (google.cloud.aiplatform_v1beta1.types.ToolUseExample.ExtensionOperation): + Extension operation to call. + + This field is a member of `oneof`_ ``Target``. + function_name (str): + Function name to call. + + This field is a member of `oneof`_ ``Target``. + display_name (str): + Required. The display name for example. + query (str): + Required. Query that should be routed to this + tool. + request_params (google.protobuf.struct_pb2.Struct): + Request parameters used for executing this + tool. + response_params (google.protobuf.struct_pb2.Struct): + Response parameters generated by this tool. + response_summary (str): + Summary of the tool response to the user + query. + """ + + class ExtensionOperation(proto.Message): + r"""Identifies one operation of the extension. + + Attributes: + extension (str): + Resource name of the extension. + operation_id (str): + Required. Operation ID of the extension. + """ + + extension: str = proto.Field( + proto.STRING, + number=1, + ) + operation_id: str = proto.Field( + proto.STRING, + number=2, + ) + + extension_operation: ExtensionOperation = proto.Field( + proto.MESSAGE, + number=10, + oneof="Target", + message=ExtensionOperation, + ) + function_name: str = proto.Field( + proto.STRING, + number=11, + oneof="Target", + ) + display_name: str = proto.Field( + proto.STRING, + number=1, + ) + query: str = proto.Field( + proto.STRING, + number=2, + ) + request_params: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=3, + message=struct_pb2.Struct, + ) + response_params: struct_pb2.Struct = proto.Field( + proto.MESSAGE, + number=4, + message=struct_pb2.Struct, + ) + response_summary: str = proto.Field( + proto.STRING, + number=5, + ) + + class FunctionDeclaration(proto.Message): r"""Structured representation of a function declaration as defined by the `OpenAPI 3.0 @@ -101,8 +191,8 @@ class FunctionDeclaration(proto.Message): name (str): Required. The name of the function to call. Must start with a letter or an underscore. - Must be a-z, A-Z, 0-9, or contain underscores - and dashes, with a maximum length of 64. + Must be a-z, A-Z, 0-9, or contain underscores, + dots and dashes, with a maximum length of 64. description (str): Optional. Description and purpose of the function. Model uses it to decide how and @@ -115,8 +205,11 @@ class FunctionDeclaration(proto.Message): case sensitive. Schema Value: the Schema defining the type used for the parameter. For function with no parameters, this can be left - unset. Example with 1 required and 1 optional - parameter: type: OBJECT properties: + unset. Parameter names must start with a letter + or an underscore and must only contain chars + a-z, A-Z, 0-9, or underscores with a maximum + length of 64. Example with 1 required and 1 + optional parameter: type: OBJECT properties: param1: @@ -127,6 +220,12 @@ class FunctionDeclaration(proto.Message): required: - param1 + response (google.cloud.aiplatform_v1beta1.types.Schema): + Optional. Describes the output from this + function in JSON Schema format. Reflects the + Open API 3.03 Response Object. The Schema + defines the type used for the response value of + the function. """ name: str = proto.Field( @@ -142,6 +241,11 @@ class FunctionDeclaration(proto.Message): number=3, message=openapi.Schema, ) + response: openapi.Schema = proto.Field( + proto.MESSAGE, + number=4, + message=openapi.Schema, + ) class FunctionCall(proto.Message): @@ -201,6 +305,10 @@ class Retrieval(proto.Message): r"""Defines a retrieval tool that model can call to access external knowledge. + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields @@ -209,6 +317,12 @@ class Retrieval(proto.Message): Set to use data source powered by Vertex AI Search. + This field is a member of `oneof`_ ``source``. + vertex_rag_store (google.cloud.aiplatform_v1beta1.types.VertexRagStore): + Set to use data source powered by Vertex RAG + store. User data is uploaded via the + VertexRagDataService. + This field is a member of `oneof`_ ``source``. disable_attribution (bool): Optional. Disable using the result from this @@ -223,21 +337,57 @@ class Retrieval(proto.Message): oneof="source", message="VertexAISearch", ) + vertex_rag_store: "VertexRagStore" = proto.Field( + proto.MESSAGE, + number=4, + oneof="source", + message="VertexRagStore", + ) disable_attribution: bool = proto.Field( proto.BOOL, number=3, ) +class VertexRagStore(proto.Message): + r"""Retrieve from Vertex RAG Store for grounding. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + rag_corpora (MutableSequence[str]): + Required. Vertex RAG Store corpus resource name: + ``projects/{project}/locations/{location}/ragCorpora/{ragCorpus}`` + Currently only one corpus is allowed. In the future we may + open up multiple corpora support. However, they should be + from the same project and location. + similarity_top_k (int): + Optional. Number of top k results to return + from the selected corpora. + + This field is a member of `oneof`_ ``_similarity_top_k``. + """ + + rag_corpora: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + similarity_top_k: int = proto.Field( + proto.INT32, + number=2, + optional=True, + ) + + class VertexAISearch(proto.Message): r"""Retrieve from Vertex AI Search datastore for grounding. See https://cloud.google.com/vertex-ai-search-and-conversation Attributes: datastore (str): - Required. Fully-qualified Vertex AI Search's - datastore resource ID. - projects/<>/locations/<>/collections/<>/dataStores/<> + Required. Fully-qualified Vertex AI Search's datastore + resource ID. Format: + ``projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}`` """ datastore: str = proto.Field( @@ -270,7 +420,7 @@ class ToolConfig(proto.Message): Attributes: function_calling_config (google.cloud.aiplatform_v1beta1.types.FunctionCallingConfig): - Function calling config. + Optional. Function calling config. """ function_calling_config: "FunctionCallingConfig" = proto.Field( @@ -285,12 +435,12 @@ class FunctionCallingConfig(proto.Message): Attributes: mode (google.cloud.aiplatform_v1beta1.types.FunctionCallingConfig.Mode): - Function calling mode. + Optional. Function calling mode. allowed_function_names (MutableSequence[str]): - Function names to call. Only set when the Mode is ANY. - Function names should match [FunctionDeclaration.name]. With - mode set to ANY, model will predict a function call from the - set of function names provided. + Optional. Function names to call. Only set when the Mode is + ANY. Function names should match [FunctionDeclaration.name]. + With mode set to ANY, model will predict a function call + from the set of function names provided. """ class Mode(proto.Enum): diff --git a/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py new file mode 100644 index 0000000000..d0947308c2 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data.py @@ -0,0 +1,287 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import io +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "RagCorpus", + "RagFile", + "RagFileChunkingConfig", + "UploadRagFileConfig", + "ImportRagFilesConfig", + }, +) + + +class RagCorpus(proto.Message): + r"""A RagCorpus is a RagFile container and a project can have + multiple RagCorpora. + + Attributes: + name (str): + Output only. The resource name of the + RagCorpus. + display_name (str): + Required. The display name of the RagCorpus. + The name can be up to 128 characters long and + can consist of any UTF-8 characters. + description (str): + Optional. The description of the RagCorpus. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this RagCorpus + was created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this RagCorpus + was last updated. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + description: str = proto.Field( + proto.STRING, + number=3, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + + +class RagFile(proto.Message): + r"""A RagFile contains user data for chunking, embedding and + indexing. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + Output only. Google Cloud Storage location of + the RagFile. It does not support wildcards in + the GCS uri for now. + + This field is a member of `oneof`_ ``rag_file_source``. + google_drive_source (google.cloud.aiplatform_v1beta1.types.GoogleDriveSource): + Output only. Google Drive location. Supports + importing individual files as well as Google + Drive folders. + + This field is a member of `oneof`_ ``rag_file_source``. + direct_upload_source (google.cloud.aiplatform_v1beta1.types.DirectUploadSource): + Output only. The RagFile is encapsulated and + uploaded in the UploadRagFile request. + + This field is a member of `oneof`_ ``rag_file_source``. + name (str): + Output only. The resource name of the + RagFile. + display_name (str): + Required. The display name of the RagFile. + The name can be up to 128 characters long and + can consist of any UTF-8 characters. + description (str): + Optional. The description of the RagFile. + size_bytes (int): + Output only. The size of the RagFile in + bytes. + rag_file_type (google.cloud.aiplatform_v1beta1.types.RagFile.RagFileType): + Output only. The type of the RagFile. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this RagFile was + created. + update_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp when this RagFile was + last updated. + """ + + class RagFileType(proto.Enum): + r"""The type of the RagFile. + + Values: + RAG_FILE_TYPE_UNSPECIFIED (0): + RagFile type is unspecified. + RAG_FILE_TYPE_TXT (1): + RagFile type is TXT. + RAG_FILE_TYPE_PDF (2): + RagFile type is PDF. + """ + RAG_FILE_TYPE_UNSPECIFIED = 0 + RAG_FILE_TYPE_TXT = 1 + RAG_FILE_TYPE_PDF = 2 + + gcs_source: io.GcsSource = proto.Field( + proto.MESSAGE, + number=8, + oneof="rag_file_source", + message=io.GcsSource, + ) + google_drive_source: io.GoogleDriveSource = proto.Field( + proto.MESSAGE, + number=9, + oneof="rag_file_source", + message=io.GoogleDriveSource, + ) + direct_upload_source: io.DirectUploadSource = proto.Field( + proto.MESSAGE, + number=10, + oneof="rag_file_source", + message=io.DirectUploadSource, + ) + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + description: str = proto.Field( + proto.STRING, + number=3, + ) + size_bytes: int = proto.Field( + proto.INT64, + number=4, + ) + rag_file_type: RagFileType = proto.Field( + proto.ENUM, + number=5, + enum=RagFileType, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=6, + message=timestamp_pb2.Timestamp, + ) + update_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + + +class RagFileChunkingConfig(proto.Message): + r"""Specifies the size and overlap of chunks for RagFiles. + + Attributes: + chunk_size (int): + The size of the chunks. + chunk_overlap (int): + The overlap between chunks. + """ + + chunk_size: int = proto.Field( + proto.INT32, + number=1, + ) + chunk_overlap: int = proto.Field( + proto.INT32, + number=2, + ) + + +class UploadRagFileConfig(proto.Message): + r"""Config for uploading RagFile. + + Attributes: + rag_file_chunking_config (google.cloud.aiplatform_v1beta1.types.RagFileChunkingConfig): + Specifies the size and overlap of chunks + after uploading RagFile. + """ + + rag_file_chunking_config: "RagFileChunkingConfig" = proto.Field( + proto.MESSAGE, + number=1, + message="RagFileChunkingConfig", + ) + + +class ImportRagFilesConfig(proto.Message): + r"""Config for importing RagFiles. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource): + Google Cloud Storage location. Supports importing individual + files as well as entire Google Cloud Storage directories. + Sample formats: + + - ``gs://bucket_name/my_directory/object_name/my_file.txt`` + - ``gs://bucket_name/my_directory`` + + This field is a member of `oneof`_ ``import_source``. + google_drive_source (google.cloud.aiplatform_v1beta1.types.GoogleDriveSource): + Google Drive location. Supports importing + individual files as well as Google Drive + folders. + + This field is a member of `oneof`_ ``import_source``. + rag_file_chunking_config (google.cloud.aiplatform_v1beta1.types.RagFileChunkingConfig): + Specifies the size and overlap of chunks + after importing RagFiles. + """ + + gcs_source: io.GcsSource = proto.Field( + proto.MESSAGE, + number=2, + oneof="import_source", + message=io.GcsSource, + ) + google_drive_source: io.GoogleDriveSource = proto.Field( + proto.MESSAGE, + number=3, + oneof="import_source", + message=io.GoogleDriveSource, + ) + rag_file_chunking_config: "RagFileChunkingConfig" = proto.Field( + proto.MESSAGE, + number=4, + message="RagFileChunkingConfig", + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/vertex_rag_data_service.py b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data_service.py new file mode 100644 index 0000000000..2857fab341 --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/vertex_rag_data_service.py @@ -0,0 +1,424 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.aiplatform_v1beta1.types import operation +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "CreateRagCorpusRequest", + "GetRagCorpusRequest", + "ListRagCorporaRequest", + "ListRagCorporaResponse", + "DeleteRagCorpusRequest", + "UploadRagFileRequest", + "UploadRagFileResponse", + "ImportRagFilesRequest", + "ImportRagFilesResponse", + "GetRagFileRequest", + "ListRagFilesRequest", + "ListRagFilesResponse", + "DeleteRagFileRequest", + "CreateRagCorpusOperationMetadata", + "ImportRagFilesOperationMetadata", + }, +) + + +class CreateRagCorpusRequest(proto.Message): + r"""Request message for + [VertexRagDataService.CreateRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.CreateRagCorpus]. + + Attributes: + parent (str): + Required. The resource name of the Location to create the + RagCorpus in. Format: + ``projects/{project}/locations/{location}`` + rag_corpus (google.cloud.aiplatform_v1beta1.types.RagCorpus): + Required. The RagCorpus to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + rag_corpus: vertex_rag_data.RagCorpus = proto.Field( + proto.MESSAGE, + number=2, + message=vertex_rag_data.RagCorpus, + ) + + +class GetRagCorpusRequest(proto.Message): + r"""Request message for + [VertexRagDataService.GetRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.GetRagCorpus] + + Attributes: + name (str): + Required. The name of the RagCorpus resource. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListRagCorporaRequest(proto.Message): + r"""Request message for + [VertexRagDataService.ListRagCorpora][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagCorpora]. + + Attributes: + parent (str): + Required. The resource name of the Location from which to + list the RagCorpora. Format: + ``projects/{project}/locations/{location}`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListRagCorporaResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListRagCorporaResponse.next_page_token] + of the previous + [VertexRagDataService.ListRagCorpora][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagCorpora] + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListRagCorporaResponse(proto.Message): + r"""Response message for + [VertexRagDataService.ListRagCorpora][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagCorpora]. + + Attributes: + rag_corpora (MutableSequence[google.cloud.aiplatform_v1beta1.types.RagCorpus]): + List of RagCorpora in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListRagCorporaRequest.page_token][google.cloud.aiplatform.v1beta1.ListRagCorporaRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + rag_corpora: MutableSequence[vertex_rag_data.RagCorpus] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=vertex_rag_data.RagCorpus, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteRagCorpusRequest(proto.Message): + r"""Request message for + [VertexRagDataService.DeleteRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.DeleteRagCorpus]. + + Attributes: + name (str): + Required. The name of the RagCorpus resource to be deleted. + Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + force (bool): + Optional. If set to true, any RagFiles in + this RagCorpus will also be deleted. Otherwise, + the request will only work if the RagCorpus has + no RagFiles. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + force: bool = proto.Field( + proto.BOOL, + number=2, + ) + + +class UploadRagFileRequest(proto.Message): + r"""Request message for + [VertexRagDataService.UploadRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile]. + + Attributes: + parent (str): + Required. The name of the RagCorpus resource into which to + upload the file. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + rag_file (google.cloud.aiplatform_v1beta1.types.RagFile): + Required. The RagFile to upload. + upload_rag_file_config (google.cloud.aiplatform_v1beta1.types.UploadRagFileConfig): + Required. The config for the RagFiles to be uploaded into + the RagCorpus. + [VertexRagDataService.UploadRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile]. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + rag_file: vertex_rag_data.RagFile = proto.Field( + proto.MESSAGE, + number=2, + message=vertex_rag_data.RagFile, + ) + upload_rag_file_config: vertex_rag_data.UploadRagFileConfig = proto.Field( + proto.MESSAGE, + number=5, + message=vertex_rag_data.UploadRagFileConfig, + ) + + +class UploadRagFileResponse(proto.Message): + r"""Response message for + [VertexRagDataService.UploadRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile]. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + rag_file (google.cloud.aiplatform_v1beta1.types.RagFile): + The RagFile that had been uploaded into the + RagCorpus. + + This field is a member of `oneof`_ ``result``. + error (google.rpc.status_pb2.Status): + The error that occurred while processing the + RagFile. + + This field is a member of `oneof`_ ``result``. + """ + + rag_file: vertex_rag_data.RagFile = proto.Field( + proto.MESSAGE, + number=1, + oneof="result", + message=vertex_rag_data.RagFile, + ) + error: status_pb2.Status = proto.Field( + proto.MESSAGE, + number=4, + oneof="result", + message=status_pb2.Status, + ) + + +class ImportRagFilesRequest(proto.Message): + r"""Request message for + [VertexRagDataService.ImportRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles]. + + Attributes: + parent (str): + Required. The name of the RagCorpus resource into which to + import files. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + import_rag_files_config (google.cloud.aiplatform_v1beta1.types.ImportRagFilesConfig): + Required. The config for the RagFiles to be synced and + imported into the RagCorpus. + [VertexRagDataService.ImportRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles]. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + import_rag_files_config: vertex_rag_data.ImportRagFilesConfig = proto.Field( + proto.MESSAGE, + number=2, + message=vertex_rag_data.ImportRagFilesConfig, + ) + + +class ImportRagFilesResponse(proto.Message): + r"""Response message for + [VertexRagDataService.ImportRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles]. + + Attributes: + imported_rag_files_count (int): + The number of RagFiles that had been imported + into the RagCorpus. + """ + + imported_rag_files_count: int = proto.Field( + proto.INT64, + number=1, + ) + + +class GetRagFileRequest(proto.Message): + r"""Request message for + [VertexRagDataService.GetRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.GetRagFile] + + Attributes: + name (str): + Required. The name of the RagFile resource. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListRagFilesRequest(proto.Message): + r"""Request message for + [VertexRagDataService.ListRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagFiles]. + + Attributes: + parent (str): + Required. The resource name of the RagCorpus from which to + list the RagFiles. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + page_size (int): + Optional. The standard list page size. + page_token (str): + Optional. The standard list page token. Typically obtained + via + [ListRagFilesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListRagFilesResponse.next_page_token] + of the previous + [VertexRagDataService.ListRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagFiles] + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListRagFilesResponse(proto.Message): + r"""Response message for + [VertexRagDataService.ListRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagFiles]. + + Attributes: + rag_files (MutableSequence[google.cloud.aiplatform_v1beta1.types.RagFile]): + List of RagFiles in the requested page. + next_page_token (str): + A token to retrieve the next page of results. Pass to + [ListRagFilesRequest.page_token][google.cloud.aiplatform.v1beta1.ListRagFilesRequest.page_token] + to obtain that page. + """ + + @property + def raw_page(self): + return self + + rag_files: MutableSequence[vertex_rag_data.RagFile] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=vertex_rag_data.RagFile, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteRagFileRequest(proto.Message): + r"""Request message for + [VertexRagDataService.DeleteRagFile][google.cloud.aiplatform.v1beta1.VertexRagDataService.DeleteRagFile]. + + Attributes: + name (str): + Required. The name of the RagFile resource to be deleted. + Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateRagCorpusOperationMetadata(proto.Message): + r"""Runtime operation information for + [VertexRagDataService.CreateRagCorpus][google.cloud.aiplatform.v1beta1.VertexRagDataService.CreateRagCorpus]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + + +class ImportRagFilesOperationMetadata(proto.Message): + r"""Runtime operation information for + [VertexRagDataService.ImportRagFiles][google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles]. + + Attributes: + generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata): + The operation generic information. + rag_corpus_id (int): + The resource ID of RagCorpus that this + operation is executed on. + """ + + generic_metadata: operation.GenericOperationMetadata = proto.Field( + proto.MESSAGE, + number=1, + message=operation.GenericOperationMetadata, + ) + rag_corpus_id: int = proto.Field( + proto.INT64, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/aiplatform_v1beta1/types/vertex_rag_service.py b/google/cloud/aiplatform_v1beta1/types/vertex_rag_service.py new file mode 100644 index 0000000000..6ea53b31dd --- /dev/null +++ b/google/cloud/aiplatform_v1beta1/types/vertex_rag_service.py @@ -0,0 +1,175 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.aiplatform.v1beta1", + manifest={ + "RagQuery", + "RetrieveContextsRequest", + "RagContexts", + "RetrieveContextsResponse", + }, +) + + +class RagQuery(proto.Message): + r"""A query to retrieve relevant contexts. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + text (str): + Optional. The query in text format to get + relevant contexts. + + This field is a member of `oneof`_ ``query``. + similarity_top_k (int): + Optional. The number of contexts to retrieve. + """ + + text: str = proto.Field( + proto.STRING, + number=1, + oneof="query", + ) + similarity_top_k: int = proto.Field( + proto.INT32, + number=2, + ) + + +class RetrieveContextsRequest(proto.Message): + r"""Request message for + [VertexRagService.RetrieveContexts][google.cloud.aiplatform.v1beta1.VertexRagService.RetrieveContexts]. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + vertex_rag_store (google.cloud.aiplatform_v1beta1.types.RetrieveContextsRequest.VertexRagStore): + The data source for Vertex RagStore. + + This field is a member of `oneof`_ ``data_source``. + parent (str): + Required. The resource name of the Location from which to + retrieve RagContexts. The users must have permission to make + a call in the project. Format: + ``projects/{project}/locations/{location}``. + query (google.cloud.aiplatform_v1beta1.types.RagQuery): + Required. Single RAG retrieve query. + """ + + class VertexRagStore(proto.Message): + r"""The data source for Vertex RagStore. + + Attributes: + rag_corpora (MutableSequence[str]): + Required. RagCorpora resource name. Format: + ``projects/{project}/locations/{location}/ragCorpora/{rag_corpus}`` + Currently only one corpus is allowed. In the future we may + open up multiple corpora support. However, they should be + from the same project and location. + """ + + rag_corpora: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + vertex_rag_store: VertexRagStore = proto.Field( + proto.MESSAGE, + number=2, + oneof="data_source", + message=VertexRagStore, + ) + parent: str = proto.Field( + proto.STRING, + number=1, + ) + query: "RagQuery" = proto.Field( + proto.MESSAGE, + number=3, + message="RagQuery", + ) + + +class RagContexts(proto.Message): + r"""Relevant contexts for one query. + + Attributes: + contexts (MutableSequence[google.cloud.aiplatform_v1beta1.types.RagContexts.Context]): + All its contexts. + """ + + class Context(proto.Message): + r"""A context of the query. + + Attributes: + source_uri (str): + For vertex RagStore, if the file is imported from Cloud + Storage or Google Drive, source_uri will be original file + URI in Cloud Storage or Google Drive; if file is uploaded, + source_uri will be file display name. + text (str): + The text chunk. + distance (float): + The distance between the query vector and the + context text vector. + """ + + source_uri: str = proto.Field( + proto.STRING, + number=1, + ) + text: str = proto.Field( + proto.STRING, + number=2, + ) + distance: float = proto.Field( + proto.DOUBLE, + number=3, + ) + + contexts: MutableSequence[Context] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=Context, + ) + + +class RetrieveContextsResponse(proto.Message): + r"""Response message for + [VertexRagService.RetrieveContexts][google.cloud.aiplatform.v1beta1.VertexRagService.RetrieveContexts]. + + Attributes: + contexts (google.cloud.aiplatform_v1beta1.types.RagContexts): + The contexts of the query. + """ + + contexts: "RagContexts" = proto.Field( + proto.MESSAGE, + number=1, + message="RagContexts", + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/noxfile.py b/noxfile.py index 482ad98633..2f36f2c5dc 100644 --- a/noxfile.py +++ b/noxfile.py @@ -180,6 +180,7 @@ def default(session): "--cov-config=.coveragerc", "--cov-report=", "--cov-fail-under=0", + "--ignore=tests/unit/vertex_ray", os.path.join("tests", "unit"), *session.posargs, ) diff --git a/pypi/_vertex_ai_placeholder/pyproject.toml b/pypi/_vertex_ai_placeholder/pyproject.toml index 027a8e35f4..090733dfa6 100644 --- a/pypi/_vertex_ai_placeholder/pyproject.toml +++ b/pypi/_vertex_ai_placeholder/pyproject.toml @@ -4,12 +4,12 @@ build-backend = "setuptools.build_meta" [project] name = "vertexai" - +dynamic = ["version", "dependencies", "optional-dependencies"] authors = [ { name="Google LLC", email="googleapis-packages@google.com" }, ] license = {text = "Apache 2.0"} -description = "Please run pip install google-cloud-aiplatform to use the Vertex SDK." +description = "Please run pip install vertexai to use the Vertex SDK." readme = "README.md" requires-python = ">=3.8" diff --git a/pypi/_vertex_ai_placeholder/version.py b/pypi/_vertex_ai_placeholder/version.py index 1cbedfc2cb..22d7d2af99 100644 --- a/pypi/_vertex_ai_placeholder/version.py +++ b/pypi/_vertex_ai_placeholder/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.46.0" +__version__ = "1.47.0" diff --git a/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_cancel_tuning_job_async.py b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_cancel_tuning_job_async.py new file mode 100644 index 0000000000..6b99e631e0 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_cancel_tuning_job_async.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_GenAiTuningService_CancelTuningJob_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_cancel_tuning_job(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelTuningJobRequest( + name="name_value", + ) + + # Make the request + await client.cancel_tuning_job(request=request) + + +# [END aiplatform_v1_generated_GenAiTuningService_CancelTuningJob_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_cancel_tuning_job_sync.py b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_cancel_tuning_job_sync.py new file mode 100644 index 0000000000..d2223a17b0 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_cancel_tuning_job_sync.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CancelTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_GenAiTuningService_CancelTuningJob_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_cancel_tuning_job(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CancelTuningJobRequest( + name="name_value", + ) + + # Make the request + client.cancel_tuning_job(request=request) + + +# [END aiplatform_v1_generated_GenAiTuningService_CancelTuningJob_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_create_tuning_job_async.py b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_create_tuning_job_async.py new file mode 100644 index 0000000000..fd403ebcd0 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_create_tuning_job_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_GenAiTuningService_CreateTuningJob_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_create_tuning_job(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceAsyncClient() + + # Initialize request argument(s) + tuning_job = aiplatform_v1.TuningJob() + tuning_job.base_model = "base_model_value" + tuning_job.supervised_tuning_spec.training_dataset_uri = "training_dataset_uri_value" + + request = aiplatform_v1.CreateTuningJobRequest( + parent="parent_value", + tuning_job=tuning_job, + ) + + # Make the request + response = await client.create_tuning_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_GenAiTuningService_CreateTuningJob_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_create_tuning_job_sync.py b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_create_tuning_job_sync.py new file mode 100644 index 0000000000..cf9bf178fc --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_create_tuning_job_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_GenAiTuningService_CreateTuningJob_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_create_tuning_job(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceClient() + + # Initialize request argument(s) + tuning_job = aiplatform_v1.TuningJob() + tuning_job.base_model = "base_model_value" + tuning_job.supervised_tuning_spec.training_dataset_uri = "training_dataset_uri_value" + + request = aiplatform_v1.CreateTuningJobRequest( + parent="parent_value", + tuning_job=tuning_job, + ) + + # Make the request + response = client.create_tuning_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_GenAiTuningService_CreateTuningJob_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_get_tuning_job_async.py b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_get_tuning_job_async.py new file mode 100644 index 0000000000..65f99a4310 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_get_tuning_job_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_GenAiTuningService_GetTuningJob_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_get_tuning_job(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTuningJobRequest( + name="name_value", + ) + + # Make the request + response = await client.get_tuning_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_GenAiTuningService_GetTuningJob_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_get_tuning_job_sync.py b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_get_tuning_job_sync.py new file mode 100644 index 0000000000..bc996143de --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_get_tuning_job_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetTuningJob +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_GenAiTuningService_GetTuningJob_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_get_tuning_job(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetTuningJobRequest( + name="name_value", + ) + + # Make the request + response = client.get_tuning_job(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_GenAiTuningService_GetTuningJob_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_list_tuning_jobs_async.py b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_list_tuning_jobs_async.py new file mode 100644 index 0000000000..509a262984 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_list_tuning_jobs_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTuningJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_GenAiTuningService_ListTuningJobs_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_list_tuning_jobs(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTuningJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tuning_jobs(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_GenAiTuningService_ListTuningJobs_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_list_tuning_jobs_sync.py b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_list_tuning_jobs_sync.py new file mode 100644 index 0000000000..b0917ca402 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_gen_ai_tuning_service_list_tuning_jobs_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListTuningJobs +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_GenAiTuningService_ListTuningJobs_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_list_tuning_jobs(): + # Create a client + client = aiplatform_v1.GenAiTuningServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListTuningJobsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_tuning_jobs(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_GenAiTuningService_ListTuningJobs_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_assign_notebook_runtime_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_assign_notebook_runtime_async.py new file mode 100644 index 0000000000..d2a3c88303 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_assign_notebook_runtime_async.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AssignNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_AssignNotebookRuntime_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_assign_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + notebook_runtime = aiplatform_v1.NotebookRuntime() + notebook_runtime.runtime_user = "runtime_user_value" + notebook_runtime.display_name = "display_name_value" + + request = aiplatform_v1.AssignNotebookRuntimeRequest( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=notebook_runtime, + ) + + # Make the request + operation = client.assign_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_AssignNotebookRuntime_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_assign_notebook_runtime_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_assign_notebook_runtime_sync.py new file mode 100644 index 0000000000..762374695c --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_assign_notebook_runtime_sync.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AssignNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_AssignNotebookRuntime_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_assign_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + notebook_runtime = aiplatform_v1.NotebookRuntime() + notebook_runtime.runtime_user = "runtime_user_value" + notebook_runtime.display_name = "display_name_value" + + request = aiplatform_v1.AssignNotebookRuntimeRequest( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=notebook_runtime, + ) + + # Make the request + operation = client.assign_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_AssignNotebookRuntime_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_create_notebook_runtime_template_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_create_notebook_runtime_template_async.py new file mode 100644 index 0000000000..40827d6451 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_create_notebook_runtime_template_async.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateNotebookRuntimeTemplate +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_CreateNotebookRuntimeTemplate_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_create_notebook_runtime_template(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + notebook_runtime_template = aiplatform_v1.NotebookRuntimeTemplate() + notebook_runtime_template.display_name = "display_name_value" + + request = aiplatform_v1.CreateNotebookRuntimeTemplateRequest( + parent="parent_value", + notebook_runtime_template=notebook_runtime_template, + ) + + # Make the request + operation = client.create_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_CreateNotebookRuntimeTemplate_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_create_notebook_runtime_template_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_create_notebook_runtime_template_sync.py new file mode 100644 index 0000000000..601a2721eb --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_create_notebook_runtime_template_sync.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateNotebookRuntimeTemplate +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_CreateNotebookRuntimeTemplate_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_create_notebook_runtime_template(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + notebook_runtime_template = aiplatform_v1.NotebookRuntimeTemplate() + notebook_runtime_template.display_name = "display_name_value" + + request = aiplatform_v1.CreateNotebookRuntimeTemplateRequest( + parent="parent_value", + notebook_runtime_template=notebook_runtime_template, + ) + + # Make the request + operation = client.create_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_CreateNotebookRuntimeTemplate_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_runtime_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_runtime_async.py new file mode 100644 index 0000000000..9522412713 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_runtime_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_DeleteNotebookRuntime_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_delete_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_DeleteNotebookRuntime_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_runtime_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_runtime_sync.py new file mode 100644 index 0000000000..9f9a2ba14c --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_runtime_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_DeleteNotebookRuntime_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_delete_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_DeleteNotebookRuntime_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_async.py new file mode 100644 index 0000000000..2755b05912 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteNotebookRuntimeTemplate +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_DeleteNotebookRuntimeTemplate_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_delete_notebook_runtime_template(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_DeleteNotebookRuntimeTemplate_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_sync.py new file mode 100644 index 0000000000..b7950cba1d --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteNotebookRuntimeTemplate +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_DeleteNotebookRuntimeTemplate_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_delete_notebook_runtime_template(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeleteNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_DeleteNotebookRuntimeTemplate_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_runtime_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_runtime_async.py new file mode 100644 index 0000000000..4a063e4d39 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_runtime_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_GetNotebookRuntime_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_get_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + response = await client.get_notebook_runtime(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_GetNotebookRuntime_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_runtime_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_runtime_sync.py new file mode 100644 index 0000000000..2c76d7d87f --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_runtime_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_GetNotebookRuntime_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_get_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + response = client.get_notebook_runtime(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_GetNotebookRuntime_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_runtime_template_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_runtime_template_async.py new file mode 100644 index 0000000000..7a9ba756da --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_runtime_template_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetNotebookRuntimeTemplate +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_GetNotebookRuntimeTemplate_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_get_notebook_runtime_template(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + response = await client.get_notebook_runtime_template(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_GetNotebookRuntimeTemplate_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_runtime_template_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_runtime_template_sync.py new file mode 100644 index 0000000000..b993ff376c --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_get_notebook_runtime_template_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetNotebookRuntimeTemplate +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_GetNotebookRuntimeTemplate_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_get_notebook_runtime_template(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + response = client.get_notebook_runtime_template(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_GetNotebookRuntimeTemplate_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_runtime_templates_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_runtime_templates_async.py new file mode 100644 index 0000000000..5de9d42f8c --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_runtime_templates_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListNotebookRuntimeTemplates +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_ListNotebookRuntimeTemplates_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_list_notebook_runtime_templates(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListNotebookRuntimeTemplatesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtime_templates(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_NotebookService_ListNotebookRuntimeTemplates_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_runtime_templates_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_runtime_templates_sync.py new file mode 100644 index 0000000000..158817e72c --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_runtime_templates_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListNotebookRuntimeTemplates +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_ListNotebookRuntimeTemplates_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_list_notebook_runtime_templates(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListNotebookRuntimeTemplatesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtime_templates(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_NotebookService_ListNotebookRuntimeTemplates_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_runtimes_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_runtimes_async.py new file mode 100644 index 0000000000..71ef52fbec --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_runtimes_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListNotebookRuntimes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_ListNotebookRuntimes_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_list_notebook_runtimes(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListNotebookRuntimesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtimes(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_NotebookService_ListNotebookRuntimes_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_runtimes_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_runtimes_sync.py new file mode 100644 index 0000000000..f2dba11497 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_list_notebook_runtimes_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListNotebookRuntimes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_ListNotebookRuntimes_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_list_notebook_runtimes(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListNotebookRuntimesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtimes(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_NotebookService_ListNotebookRuntimes_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_start_notebook_runtime_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_start_notebook_runtime_async.py new file mode 100644 index 0000000000..a5f8cbf086 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_start_notebook_runtime_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StartNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_StartNotebookRuntime_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_start_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.StartNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.start_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_StartNotebookRuntime_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_start_notebook_runtime_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_start_notebook_runtime_sync.py new file mode 100644 index 0000000000..08d3547b43 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_start_notebook_runtime_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StartNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_StartNotebookRuntime_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_start_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.StartNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.start_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_StartNotebookRuntime_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_upgrade_notebook_runtime_async.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_upgrade_notebook_runtime_async.py new file mode 100644 index 0000000000..4ed1799644 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_upgrade_notebook_runtime_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpgradeNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_UpgradeNotebookRuntime_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_upgrade_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpgradeNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.upgrade_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_UpgradeNotebookRuntime_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_notebook_service_upgrade_notebook_runtime_sync.py b/samples/generated_samples/aiplatform_v1_generated_notebook_service_upgrade_notebook_runtime_sync.py new file mode 100644 index 0000000000..43c923577c --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_notebook_service_upgrade_notebook_runtime_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpgradeNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_NotebookService_UpgradeNotebookRuntime_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_upgrade_notebook_runtime(): + # Create a client + client = aiplatform_v1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpgradeNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.upgrade_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_NotebookService_UpgradeNotebookRuntime_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_create_persistent_resource_async.py b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_create_persistent_resource_async.py new file mode 100644 index 0000000000..52206aabdf --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_create_persistent_resource_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreatePersistentResource +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PersistentResourceService_CreatePersistentResource_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_create_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreatePersistentResourceRequest( + parent="parent_value", + persistent_resource_id="persistent_resource_id_value", + ) + + # Make the request + operation = client.create_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PersistentResourceService_CreatePersistentResource_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_create_persistent_resource_sync.py b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_create_persistent_resource_sync.py new file mode 100644 index 0000000000..d95377b007 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_create_persistent_resource_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreatePersistentResource +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PersistentResourceService_CreatePersistentResource_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_create_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.CreatePersistentResourceRequest( + parent="parent_value", + persistent_resource_id="persistent_resource_id_value", + ) + + # Make the request + operation = client.create_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PersistentResourceService_CreatePersistentResource_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_delete_persistent_resource_async.py b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_delete_persistent_resource_async.py new file mode 100644 index 0000000000..2aacba0f0c --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_delete_persistent_resource_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeletePersistentResource +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PersistentResourceService_DeletePersistentResource_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_delete_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeletePersistentResourceRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PersistentResourceService_DeletePersistentResource_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_delete_persistent_resource_sync.py b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_delete_persistent_resource_sync.py new file mode 100644 index 0000000000..fccfe5552f --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_delete_persistent_resource_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeletePersistentResource +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PersistentResourceService_DeletePersistentResource_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_delete_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.DeletePersistentResourceRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PersistentResourceService_DeletePersistentResource_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_get_persistent_resource_async.py b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_get_persistent_resource_async.py new file mode 100644 index 0000000000..dcd4fd2ff6 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_get_persistent_resource_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPersistentResource +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PersistentResourceService_GetPersistentResource_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_get_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetPersistentResourceRequest( + name="name_value", + ) + + # Make the request + response = await client.get_persistent_resource(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PersistentResourceService_GetPersistentResource_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_get_persistent_resource_sync.py b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_get_persistent_resource_sync.py new file mode 100644 index 0000000000..03386dfea1 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_get_persistent_resource_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetPersistentResource +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PersistentResourceService_GetPersistentResource_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_get_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.GetPersistentResourceRequest( + name="name_value", + ) + + # Make the request + response = client.get_persistent_resource(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PersistentResourceService_GetPersistentResource_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_list_persistent_resources_async.py b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_list_persistent_resources_async.py new file mode 100644 index 0000000000..3d6aa42ba2 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_list_persistent_resources_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPersistentResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PersistentResourceService_ListPersistentResources_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_list_persistent_resources(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListPersistentResourcesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_persistent_resources(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1_generated_PersistentResourceService_ListPersistentResources_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_list_persistent_resources_sync.py b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_list_persistent_resources_sync.py new file mode 100644 index 0000000000..b28f28453c --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_list_persistent_resources_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListPersistentResources +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PersistentResourceService_ListPersistentResources_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_list_persistent_resources(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.ListPersistentResourcesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_persistent_resources(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1_generated_PersistentResourceService_ListPersistentResources_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_reboot_persistent_resource_async.py b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_reboot_persistent_resource_async.py new file mode 100644 index 0000000000..68ea7ac436 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_reboot_persistent_resource_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RebootPersistentResource +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PersistentResourceService_RebootPersistentResource_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_reboot_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.RebootPersistentResourceRequest( + name="name_value", + ) + + # Make the request + operation = client.reboot_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PersistentResourceService_RebootPersistentResource_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_reboot_persistent_resource_sync.py b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_reboot_persistent_resource_sync.py new file mode 100644 index 0000000000..ebf8064b89 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_reboot_persistent_resource_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RebootPersistentResource +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PersistentResourceService_RebootPersistentResource_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_reboot_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.RebootPersistentResourceRequest( + name="name_value", + ) + + # Make the request + operation = client.reboot_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PersistentResourceService_RebootPersistentResource_sync] diff --git a/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_update_persistent_resource_async.py b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_update_persistent_resource_async.py new file mode 100644 index 0000000000..33ec17e634 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_update_persistent_resource_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdatePersistentResource +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PersistentResourceService_UpdatePersistentResource_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +async def sample_update_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdatePersistentResourceRequest( + ) + + # Make the request + operation = client.update_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PersistentResourceService_UpdatePersistentResource_async] diff --git a/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_update_persistent_resource_sync.py b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_update_persistent_resource_sync.py new file mode 100644 index 0000000000..23717d86a1 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1_generated_persistent_resource_service_update_persistent_resource_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdatePersistentResource +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1_generated_PersistentResourceService_UpdatePersistentResource_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1 + + +def sample_update_persistent_resource(): + # Create a client + client = aiplatform_v1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1.UpdatePersistentResourceRequest( + ) + + # Make the request + operation = client.update_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1_generated_PersistentResourceService_UpdatePersistentResource_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_evaluation_service_evaluate_instances_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_evaluation_service_evaluate_instances_async.py new file mode 100644 index 0000000000..0494c76a86 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_evaluation_service_evaluate_instances_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for EvaluateInstances +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EvaluationService_EvaluateInstances_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_evaluate_instances(): + # Create a client + client = aiplatform_v1beta1.EvaluationServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.EvaluateInstancesRequest( + location="location_value", + ) + + # Make the request + response = await client.evaluate_instances(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EvaluationService_EvaluateInstances_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_evaluation_service_evaluate_instances_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_evaluation_service_evaluate_instances_sync.py new file mode 100644 index 0000000000..6a327588ab --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_evaluation_service_evaluate_instances_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for EvaluateInstances +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_EvaluationService_EvaluateInstances_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_evaluate_instances(): + # Create a client + client = aiplatform_v1beta1.EvaluationServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.EvaluateInstancesRequest( + location="location_value", + ) + + # Make the request + response = client.evaluate_instances(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_EvaluationService_EvaluateInstances_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_execution_service_execute_extension_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_execution_service_execute_extension_async.py new file mode 100644 index 0000000000..c2e78fe65d --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_execution_service_execute_extension_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExecuteExtension +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionExecutionService_ExecuteExtension_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_execute_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionExecutionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExecuteExtensionRequest( + name="name_value", + operation_id="operation_id_value", + ) + + # Make the request + response = await client.execute_extension(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionExecutionService_ExecuteExtension_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_execution_service_execute_extension_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_execution_service_execute_extension_sync.py new file mode 100644 index 0000000000..efd1167fab --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_execution_service_execute_extension_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ExecuteExtension +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionExecutionService_ExecuteExtension_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_execute_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionExecutionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ExecuteExtensionRequest( + name="name_value", + operation_id="operation_id_value", + ) + + # Make the request + response = client.execute_extension(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionExecutionService_ExecuteExtension_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_execution_service_query_extension_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_execution_service_query_extension_async.py new file mode 100644 index 0000000000..38536d0126 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_execution_service_query_extension_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryExtension +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionExecutionService_QueryExtension_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_query_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionExecutionServiceAsyncClient() + + # Initialize request argument(s) + contents = aiplatform_v1beta1.Content() + contents.parts.text = "text_value" + + request = aiplatform_v1beta1.QueryExtensionRequest( + name="name_value", + contents=contents, + ) + + # Make the request + response = await client.query_extension(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionExecutionService_QueryExtension_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_execution_service_query_extension_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_execution_service_query_extension_sync.py new file mode 100644 index 0000000000..a73e025225 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_execution_service_query_extension_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for QueryExtension +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionExecutionService_QueryExtension_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_query_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionExecutionServiceClient() + + # Initialize request argument(s) + contents = aiplatform_v1beta1.Content() + contents.parts.text = "text_value" + + request = aiplatform_v1beta1.QueryExtensionRequest( + name="name_value", + contents=contents, + ) + + # Make the request + response = client.query_extension(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionExecutionService_QueryExtension_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_delete_extension_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_delete_extension_async.py new file mode 100644 index 0000000000..d814b7e96c --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_delete_extension_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExtension +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionRegistryService_DeleteExtension_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteExtensionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_extension(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionRegistryService_DeleteExtension_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_delete_extension_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_delete_extension_sync.py new file mode 100644 index 0000000000..a63a2a0f66 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_delete_extension_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteExtension +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionRegistryService_DeleteExtension_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteExtensionRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_extension(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionRegistryService_DeleteExtension_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_get_extension_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_get_extension_async.py new file mode 100644 index 0000000000..aef177872f --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_get_extension_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExtension +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionRegistryService_GetExtension_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetExtensionRequest( + name="name_value", + ) + + # Make the request + response = await client.get_extension(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionRegistryService_GetExtension_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_get_extension_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_get_extension_sync.py new file mode 100644 index 0000000000..c615ebaaaf --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_get_extension_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetExtension +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionRegistryService_GetExtension_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_get_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetExtensionRequest( + name="name_value", + ) + + # Make the request + response = client.get_extension(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionRegistryService_GetExtension_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_import_extension_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_import_extension_async.py new file mode 100644 index 0000000000..cf89378168 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_import_extension_async.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportExtension +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionRegistryService_ImportExtension_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_import_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient() + + # Initialize request argument(s) + extension = aiplatform_v1beta1.Extension() + extension.display_name = "display_name_value" + extension.manifest.name = "name_value" + extension.manifest.description = "description_value" + extension.manifest.api_spec.open_api_yaml = "open_api_yaml_value" + extension.manifest.auth_config.api_key_config.name = "name_value" + extension.manifest.auth_config.api_key_config.api_key_secret = "api_key_secret_value" + extension.manifest.auth_config.api_key_config.http_element_location = "HTTP_IN_COOKIE" + + request = aiplatform_v1beta1.ImportExtensionRequest( + parent="parent_value", + extension=extension, + ) + + # Make the request + operation = client.import_extension(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionRegistryService_ImportExtension_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_import_extension_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_import_extension_sync.py new file mode 100644 index 0000000000..4f1315e352 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_import_extension_sync.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportExtension +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionRegistryService_ImportExtension_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_import_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceClient() + + # Initialize request argument(s) + extension = aiplatform_v1beta1.Extension() + extension.display_name = "display_name_value" + extension.manifest.name = "name_value" + extension.manifest.description = "description_value" + extension.manifest.api_spec.open_api_yaml = "open_api_yaml_value" + extension.manifest.auth_config.api_key_config.name = "name_value" + extension.manifest.auth_config.api_key_config.api_key_secret = "api_key_secret_value" + extension.manifest.auth_config.api_key_config.http_element_location = "HTTP_IN_COOKIE" + + request = aiplatform_v1beta1.ImportExtensionRequest( + parent="parent_value", + extension=extension, + ) + + # Make the request + operation = client.import_extension(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionRegistryService_ImportExtension_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_list_extensions_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_list_extensions_async.py new file mode 100644 index 0000000000..4b41bf154e --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_list_extensions_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExtensions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionRegistryService_ListExtensions_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_extensions(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListExtensionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_extensions(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionRegistryService_ListExtensions_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_list_extensions_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_list_extensions_sync.py new file mode 100644 index 0000000000..92e7d2714d --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_list_extensions_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListExtensions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionRegistryService_ListExtensions_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_list_extensions(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListExtensionsRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_extensions(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionRegistryService_ListExtensions_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_update_extension_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_update_extension_async.py new file mode 100644 index 0000000000..503ed231da --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_update_extension_async.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExtension +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionRegistryService_UpdateExtension_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_update_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient() + + # Initialize request argument(s) + extension = aiplatform_v1beta1.Extension() + extension.display_name = "display_name_value" + extension.manifest.name = "name_value" + extension.manifest.description = "description_value" + extension.manifest.api_spec.open_api_yaml = "open_api_yaml_value" + extension.manifest.auth_config.api_key_config.name = "name_value" + extension.manifest.auth_config.api_key_config.api_key_secret = "api_key_secret_value" + extension.manifest.auth_config.api_key_config.http_element_location = "HTTP_IN_COOKIE" + + request = aiplatform_v1beta1.UpdateExtensionRequest( + extension=extension, + ) + + # Make the request + response = await client.update_extension(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionRegistryService_UpdateExtension_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_update_extension_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_update_extension_sync.py new file mode 100644 index 0000000000..d4393afad3 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_extension_registry_service_update_extension_sync.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpdateExtension +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_ExtensionRegistryService_UpdateExtension_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_update_extension(): + # Create a client + client = aiplatform_v1beta1.ExtensionRegistryServiceClient() + + # Initialize request argument(s) + extension = aiplatform_v1beta1.Extension() + extension.display_name = "display_name_value" + extension.manifest.name = "name_value" + extension.manifest.description = "description_value" + extension.manifest.api_spec.open_api_yaml = "open_api_yaml_value" + extension.manifest.auth_config.api_key_config.name = "name_value" + extension.manifest.auth_config.api_key_config.api_key_secret = "api_key_secret_value" + extension.manifest.auth_config.api_key_config.http_element_location = "HTTP_IN_COOKIE" + + request = aiplatform_v1beta1.UpdateExtensionRequest( + extension=extension, + ) + + # Make the request + response = client.update_extension(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_ExtensionRegistryService_UpdateExtension_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_assign_notebook_runtime_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_assign_notebook_runtime_async.py new file mode 100644 index 0000000000..9f785edaf0 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_assign_notebook_runtime_async.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AssignNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_AssignNotebookRuntime_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_assign_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + notebook_runtime = aiplatform_v1beta1.NotebookRuntime() + notebook_runtime.runtime_user = "runtime_user_value" + notebook_runtime.display_name = "display_name_value" + + request = aiplatform_v1beta1.AssignNotebookRuntimeRequest( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=notebook_runtime, + ) + + # Make the request + operation = client.assign_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_AssignNotebookRuntime_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_assign_notebook_runtime_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_assign_notebook_runtime_sync.py new file mode 100644 index 0000000000..269a60e4d3 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_assign_notebook_runtime_sync.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AssignNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_AssignNotebookRuntime_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_assign_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + notebook_runtime = aiplatform_v1beta1.NotebookRuntime() + notebook_runtime.runtime_user = "runtime_user_value" + notebook_runtime.display_name = "display_name_value" + + request = aiplatform_v1beta1.AssignNotebookRuntimeRequest( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=notebook_runtime, + ) + + # Make the request + operation = client.assign_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_AssignNotebookRuntime_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_create_notebook_runtime_template_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_create_notebook_runtime_template_async.py new file mode 100644 index 0000000000..cb3edc7580 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_create_notebook_runtime_template_async.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateNotebookRuntimeTemplate +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_CreateNotebookRuntimeTemplate_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_notebook_runtime_template(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + notebook_runtime_template = aiplatform_v1beta1.NotebookRuntimeTemplate() + notebook_runtime_template.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateNotebookRuntimeTemplateRequest( + parent="parent_value", + notebook_runtime_template=notebook_runtime_template, + ) + + # Make the request + operation = client.create_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_CreateNotebookRuntimeTemplate_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_create_notebook_runtime_template_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_create_notebook_runtime_template_sync.py new file mode 100644 index 0000000000..81d766166f --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_create_notebook_runtime_template_sync.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateNotebookRuntimeTemplate +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_CreateNotebookRuntimeTemplate_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_create_notebook_runtime_template(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + notebook_runtime_template = aiplatform_v1beta1.NotebookRuntimeTemplate() + notebook_runtime_template.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateNotebookRuntimeTemplateRequest( + parent="parent_value", + notebook_runtime_template=notebook_runtime_template, + ) + + # Make the request + operation = client.create_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_CreateNotebookRuntimeTemplate_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_async.py new file mode 100644 index 0000000000..27c937aafc --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_DeleteNotebookRuntime_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_DeleteNotebookRuntime_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_sync.py new file mode 100644 index 0000000000..c4614d6ce6 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_DeleteNotebookRuntime_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_DeleteNotebookRuntime_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_template_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_template_async.py new file mode 100644 index 0000000000..54c77cc5fa --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_template_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteNotebookRuntimeTemplate +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_DeleteNotebookRuntimeTemplate_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_notebook_runtime_template(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_DeleteNotebookRuntimeTemplate_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_template_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_template_sync.py new file mode 100644 index 0000000000..86ff70bb7b --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_template_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteNotebookRuntimeTemplate +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_DeleteNotebookRuntimeTemplate_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_notebook_runtime_template(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_notebook_runtime_template(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_DeleteNotebookRuntimeTemplate_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_async.py new file mode 100644 index 0000000000..7e204018b6 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_GetNotebookRuntime_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + response = await client.get_notebook_runtime(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_GetNotebookRuntime_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_sync.py new file mode 100644 index 0000000000..4c5977c3be --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_GetNotebookRuntime_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_get_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + response = client.get_notebook_runtime(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_GetNotebookRuntime_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_template_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_template_async.py new file mode 100644 index 0000000000..f8a01c132f --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_template_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetNotebookRuntimeTemplate +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_GetNotebookRuntimeTemplate_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_notebook_runtime_template(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + response = await client.get_notebook_runtime_template(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_GetNotebookRuntimeTemplate_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_template_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_template_sync.py new file mode 100644 index 0000000000..ee41439f55 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_template_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetNotebookRuntimeTemplate +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_GetNotebookRuntimeTemplate_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_get_notebook_runtime_template(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Make the request + response = client.get_notebook_runtime_template(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_GetNotebookRuntimeTemplate_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_list_notebook_runtime_templates_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_list_notebook_runtime_templates_async.py new file mode 100644 index 0000000000..88b2f7f8e1 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_list_notebook_runtime_templates_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListNotebookRuntimeTemplates +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_ListNotebookRuntimeTemplates_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_notebook_runtime_templates(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListNotebookRuntimeTemplatesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtime_templates(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_ListNotebookRuntimeTemplates_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_list_notebook_runtime_templates_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_list_notebook_runtime_templates_sync.py new file mode 100644 index 0000000000..7cdd588dd3 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_list_notebook_runtime_templates_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListNotebookRuntimeTemplates +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_ListNotebookRuntimeTemplates_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_list_notebook_runtime_templates(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListNotebookRuntimeTemplatesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtime_templates(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_ListNotebookRuntimeTemplates_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_list_notebook_runtimes_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_list_notebook_runtimes_async.py new file mode 100644 index 0000000000..66ab650eb8 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_list_notebook_runtimes_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListNotebookRuntimes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_ListNotebookRuntimes_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_notebook_runtimes(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListNotebookRuntimesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtimes(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_ListNotebookRuntimes_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_list_notebook_runtimes_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_list_notebook_runtimes_sync.py new file mode 100644 index 0000000000..f8b5443dfe --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_list_notebook_runtimes_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListNotebookRuntimes +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_ListNotebookRuntimes_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_list_notebook_runtimes(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListNotebookRuntimesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_notebook_runtimes(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_ListNotebookRuntimes_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_start_notebook_runtime_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_start_notebook_runtime_async.py new file mode 100644 index 0000000000..6fa57b4d5a --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_start_notebook_runtime_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StartNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_StartNotebookRuntime_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_start_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.StartNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.start_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_StartNotebookRuntime_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_start_notebook_runtime_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_start_notebook_runtime_sync.py new file mode 100644 index 0000000000..4be795b5a0 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_start_notebook_runtime_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for StartNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_StartNotebookRuntime_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_start_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.StartNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.start_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_StartNotebookRuntime_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_upgrade_notebook_runtime_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_upgrade_notebook_runtime_async.py new file mode 100644 index 0000000000..81ef3ad901 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_upgrade_notebook_runtime_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpgradeNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_UpgradeNotebookRuntime_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_upgrade_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpgradeNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.upgrade_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_UpgradeNotebookRuntime_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_upgrade_notebook_runtime_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_upgrade_notebook_runtime_sync.py new file mode 100644 index 0000000000..849fc4e4ab --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_notebook_service_upgrade_notebook_runtime_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UpgradeNotebookRuntime +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_NotebookService_UpgradeNotebookRuntime_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_upgrade_notebook_runtime(): + # Create a client + client = aiplatform_v1beta1.NotebookServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.UpgradeNotebookRuntimeRequest( + name="name_value", + ) + + # Make the request + operation = client.upgrade_notebook_runtime(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_NotebookService_UpgradeNotebookRuntime_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_persistent_resource_service_reboot_persistent_resource_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_persistent_resource_service_reboot_persistent_resource_async.py new file mode 100644 index 0000000000..f039e344b4 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_persistent_resource_service_reboot_persistent_resource_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RebootPersistentResource +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PersistentResourceService_RebootPersistentResource_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_reboot_persistent_resource(): + # Create a client + client = aiplatform_v1beta1.PersistentResourceServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.RebootPersistentResourceRequest( + name="name_value", + ) + + # Make the request + operation = client.reboot_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PersistentResourceService_RebootPersistentResource_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_persistent_resource_service_reboot_persistent_resource_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_persistent_resource_service_reboot_persistent_resource_sync.py new file mode 100644 index 0000000000..0ae5e10659 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_persistent_resource_service_reboot_persistent_resource_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RebootPersistentResource +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PersistentResourceService_RebootPersistentResource_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_reboot_persistent_resource(): + # Create a client + client = aiplatform_v1beta1.PersistentResourceServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.RebootPersistentResourceRequest( + name="name_value", + ) + + # Make the request + operation = client.reboot_persistent_resource(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_PersistentResourceService_RebootPersistentResource_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_chat_completions_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_chat_completions_async.py new file mode 100644 index 0000000000..9841f6b911 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_chat_completions_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ChatCompletions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PredictionService_ChatCompletions_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_chat_completions(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ChatCompletionsRequest( + endpoint="endpoint_value", + ) + + # Make the request + stream = await client.chat_completions(request=request) + + # Handle the response + async for response in stream: + print(response) + +# [END aiplatform_v1beta1_generated_PredictionService_ChatCompletions_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_chat_completions_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_chat_completions_sync.py new file mode 100644 index 0000000000..fa56c559aa --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_prediction_service_chat_completions_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ChatCompletions +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_PredictionService_ChatCompletions_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_chat_completions(): + # Create a client + client = aiplatform_v1beta1.PredictionServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ChatCompletionsRequest( + endpoint="endpoint_value", + ) + + # Make the request + stream = client.chat_completions(request=request) + + # Handle the response + for response in stream: + print(response) + +# [END aiplatform_v1beta1_generated_PredictionService_ChatCompletions_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_create_rag_corpus_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_create_rag_corpus_async.py new file mode 100644 index 0000000000..6daba76ebc --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_create_rag_corpus_async.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateRagCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_CreateRagCorpus_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_create_rag_corpus(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + rag_corpus = aiplatform_v1beta1.RagCorpus() + rag_corpus.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateRagCorpusRequest( + parent="parent_value", + rag_corpus=rag_corpus, + ) + + # Make the request + operation = client.create_rag_corpus(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_CreateRagCorpus_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_create_rag_corpus_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_create_rag_corpus_sync.py new file mode 100644 index 0000000000..e76eb77d01 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_create_rag_corpus_sync.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateRagCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_CreateRagCorpus_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_create_rag_corpus(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + rag_corpus = aiplatform_v1beta1.RagCorpus() + rag_corpus.display_name = "display_name_value" + + request = aiplatform_v1beta1.CreateRagCorpusRequest( + parent="parent_value", + rag_corpus=rag_corpus, + ) + + # Make the request + operation = client.create_rag_corpus(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_CreateRagCorpus_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_corpus_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_corpus_async.py new file mode 100644 index 0000000000..012d6a274d --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_corpus_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteRagCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_DeleteRagCorpus_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_rag_corpus(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteRagCorpusRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_rag_corpus(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_DeleteRagCorpus_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_corpus_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_corpus_sync.py new file mode 100644 index 0000000000..3612ab4d60 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_corpus_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteRagCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_DeleteRagCorpus_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_rag_corpus(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteRagCorpusRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_rag_corpus(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_DeleteRagCorpus_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_file_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_file_async.py new file mode 100644 index 0000000000..7c4cc94156 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_file_async.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteRagFile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_DeleteRagFile_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_delete_rag_file(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteRagFileRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_rag_file(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_DeleteRagFile_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_file_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_file_sync.py new file mode 100644 index 0000000000..36c85d453a --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_file_sync.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for DeleteRagFile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_DeleteRagFile_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_delete_rag_file(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.DeleteRagFileRequest( + name="name_value", + ) + + # Make the request + operation = client.delete_rag_file(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_DeleteRagFile_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_corpus_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_corpus_async.py new file mode 100644 index 0000000000..5bb4d8354e --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_corpus_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetRagCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_GetRagCorpus_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_rag_corpus(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetRagCorpusRequest( + name="name_value", + ) + + # Make the request + response = await client.get_rag_corpus(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_GetRagCorpus_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_corpus_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_corpus_sync.py new file mode 100644 index 0000000000..182b53a49b --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_corpus_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetRagCorpus +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_GetRagCorpus_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_get_rag_corpus(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetRagCorpusRequest( + name="name_value", + ) + + # Make the request + response = client.get_rag_corpus(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_GetRagCorpus_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_file_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_file_async.py new file mode 100644 index 0000000000..ae0e4d9586 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_file_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetRagFile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_GetRagFile_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_get_rag_file(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetRagFileRequest( + name="name_value", + ) + + # Make the request + response = await client.get_rag_file(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_GetRagFile_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_file_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_file_sync.py new file mode 100644 index 0000000000..58611e1ec6 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_file_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetRagFile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_GetRagFile_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_get_rag_file(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.GetRagFileRequest( + name="name_value", + ) + + # Make the request + response = client.get_rag_file(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_GetRagFile_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_import_rag_files_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_import_rag_files_async.py new file mode 100644 index 0000000000..8ae50a697c --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_import_rag_files_async.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportRagFiles +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_ImportRagFiles_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_import_rag_files(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + import_rag_files_config = aiplatform_v1beta1.ImportRagFilesConfig() + import_rag_files_config.gcs_source.uris = ['uris_value1', 'uris_value2'] + + request = aiplatform_v1beta1.ImportRagFilesRequest( + parent="parent_value", + import_rag_files_config=import_rag_files_config, + ) + + # Make the request + operation = client.import_rag_files(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_ImportRagFiles_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_import_rag_files_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_import_rag_files_sync.py new file mode 100644 index 0000000000..0733c28d9f --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_import_rag_files_sync.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ImportRagFiles +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_ImportRagFiles_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_import_rag_files(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + import_rag_files_config = aiplatform_v1beta1.ImportRagFilesConfig() + import_rag_files_config.gcs_source.uris = ['uris_value1', 'uris_value2'] + + request = aiplatform_v1beta1.ImportRagFilesRequest( + parent="parent_value", + import_rag_files_config=import_rag_files_config, + ) + + # Make the request + operation = client.import_rag_files(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_ImportRagFiles_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_corpora_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_corpora_async.py new file mode 100644 index 0000000000..0b86ca221b --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_corpora_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListRagCorpora +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_ListRagCorpora_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_rag_corpora(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListRagCorporaRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_rag_corpora(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_ListRagCorpora_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_corpora_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_corpora_sync.py new file mode 100644 index 0000000000..700ab95372 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_corpora_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListRagCorpora +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_ListRagCorpora_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_list_rag_corpora(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListRagCorporaRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_rag_corpora(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_ListRagCorpora_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_files_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_files_async.py new file mode 100644 index 0000000000..862b658f21 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_files_async.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListRagFiles +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_ListRagFiles_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_list_rag_files(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListRagFilesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_rag_files(request=request) + + # Handle the response + async for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_ListRagFiles_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_files_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_files_sync.py new file mode 100644 index 0000000000..36907a23bb --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_files_sync.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ListRagFiles +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_ListRagFiles_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_list_rag_files(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + request = aiplatform_v1beta1.ListRagFilesRequest( + parent="parent_value", + ) + + # Make the request + page_result = client.list_rag_files(request=request) + + # Handle the response + for response in page_result: + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_ListRagFiles_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_upload_rag_file_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_upload_rag_file_async.py new file mode 100644 index 0000000000..60d05dcad1 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_upload_rag_file_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UploadRagFile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_UploadRagFile_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_upload_rag_file(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceAsyncClient() + + # Initialize request argument(s) + rag_file = aiplatform_v1beta1.RagFile() + rag_file.gcs_source.uris = ['uris_value1', 'uris_value2'] + rag_file.display_name = "display_name_value" + + request = aiplatform_v1beta1.UploadRagFileRequest( + parent="parent_value", + rag_file=rag_file, + ) + + # Make the request + response = await client.upload_rag_file(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_UploadRagFile_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_upload_rag_file_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_upload_rag_file_sync.py new file mode 100644 index 0000000000..2715dd2a1e --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_data_service_upload_rag_file_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for UploadRagFile +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagDataService_UploadRagFile_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_upload_rag_file(): + # Create a client + client = aiplatform_v1beta1.VertexRagDataServiceClient() + + # Initialize request argument(s) + rag_file = aiplatform_v1beta1.RagFile() + rag_file.gcs_source.uris = ['uris_value1', 'uris_value2'] + rag_file.display_name = "display_name_value" + + request = aiplatform_v1beta1.UploadRagFileRequest( + parent="parent_value", + rag_file=rag_file, + ) + + # Make the request + response = client.upload_rag_file(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagDataService_UploadRagFile_sync] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_service_retrieve_contexts_async.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_service_retrieve_contexts_async.py new file mode 100644 index 0000000000..7505fd1e36 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_service_retrieve_contexts_async.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RetrieveContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagService_RetrieveContexts_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +async def sample_retrieve_contexts(): + # Create a client + client = aiplatform_v1beta1.VertexRagServiceAsyncClient() + + # Initialize request argument(s) + vertex_rag_store = aiplatform_v1beta1.VertexRagStore() + vertex_rag_store.rag_corpora = ['rag_corpora_value1', 'rag_corpora_value2'] + + query = aiplatform_v1beta1.RagQuery() + query.text = "text_value" + + request = aiplatform_v1beta1.RetrieveContextsRequest( + vertex_rag_store=vertex_rag_store, + parent="parent_value", + query=query, + ) + + # Make the request + response = await client.retrieve_contexts(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagService_RetrieveContexts_async] diff --git a/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_service_retrieve_contexts_sync.py b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_service_retrieve_contexts_sync.py new file mode 100644 index 0000000000..987da38bf0 --- /dev/null +++ b/samples/generated_samples/aiplatform_v1beta1_generated_vertex_rag_service_retrieve_contexts_sync.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for RetrieveContexts +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-aiplatform + + +# [START aiplatform_v1beta1_generated_VertexRagService_RetrieveContexts_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import aiplatform_v1beta1 + + +def sample_retrieve_contexts(): + # Create a client + client = aiplatform_v1beta1.VertexRagServiceClient() + + # Initialize request argument(s) + vertex_rag_store = aiplatform_v1beta1.VertexRagStore() + vertex_rag_store.rag_corpora = ['rag_corpora_value1', 'rag_corpora_value2'] + + query = aiplatform_v1beta1.RagQuery() + query.text = "text_value" + + request = aiplatform_v1beta1.RetrieveContextsRequest( + vertex_rag_store=vertex_rag_store, + parent="parent_value", + query=query, + ) + + # Make the request + response = client.retrieve_contexts(request=request) + + # Handle the response + print(response) + +# [END aiplatform_v1beta1_generated_VertexRagService_RetrieveContexts_sync] diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index 87387cc762..9e5480e1de 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.46.0" + "version": "1.47.0" }, "snippets": [ { @@ -13240,31 +13240,27 @@ "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", - "shortName": "IndexEndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceAsyncClient", + "shortName": "GenAiTuningServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.create_index_endpoint", + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceAsyncClient.cancel_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService.CancelTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService", + "shortName": "GenAiTuningService" }, - "shortName": "CreateIndexEndpoint" + "shortName": "CancelTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.CancelTuningJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "index_endpoint", - "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -13278,22 +13274,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_index_endpoint" + "shortName": "cancel_tuning_job" }, - "description": "Sample for CreateIndexEndpoint", - "file": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_async.py", + "description": "Sample for CancelTuningJob", + "file": "aiplatform_v1_generated_gen_ai_tuning_service_cancel_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_CreateIndexEndpoint_async", + "regionTag": "aiplatform_v1_generated_GenAiTuningService_CancelTuningJob_async", "segments": [ { - "end": 59, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 49, "start": 27, "type": "SHORT" }, @@ -13303,52 +13298,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_async.py" + "title": "aiplatform_v1_generated_gen_ai_tuning_service_cancel_tuning_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", - "shortName": "IndexEndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceClient", + "shortName": "GenAiTuningServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.create_index_endpoint", + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceClient.cancel_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService.CancelTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService", + "shortName": "GenAiTuningService" }, - "shortName": "CreateIndexEndpoint" + "shortName": "CancelTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.CancelTuningJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "index_endpoint", - "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -13362,22 +13351,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_index_endpoint" + "shortName": "cancel_tuning_job" }, - "description": "Sample for CreateIndexEndpoint", - "file": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_sync.py", + "description": "Sample for CancelTuningJob", + "file": "aiplatform_v1_generated_gen_ai_tuning_service_cancel_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_CreateIndexEndpoint_sync", + "regionTag": "aiplatform_v1_generated_GenAiTuningService_CancelTuningJob_sync", "segments": [ { - "end": 59, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 49, "start": 27, "type": "SHORT" }, @@ -13387,49 +13375,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_sync.py" + "title": "aiplatform_v1_generated_gen_ai_tuning_service_cancel_tuning_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", - "shortName": "IndexEndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceAsyncClient", + "shortName": "GenAiTuningServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.delete_index_endpoint", + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceAsyncClient.create_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService.CreateTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService", + "shortName": "GenAiTuningService" }, - "shortName": "DeleteIndexEndpoint" + "shortName": "CreateTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.CreateTuningJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "tuning_job", + "type": "google.cloud.aiplatform_v1.types.TuningJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -13443,22 +13433,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_index_endpoint" + "resultType": "google.cloud.aiplatform_v1.types.TuningJob", + "shortName": "create_tuning_job" }, - "description": "Sample for DeleteIndexEndpoint", - "file": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_async.py", + "description": "Sample for CreateTuningJob", + "file": "aiplatform_v1_generated_gen_ai_tuning_service_create_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeleteIndexEndpoint_async", + "regionTag": "aiplatform_v1_generated_GenAiTuningService_CreateTuningJob_async", "segments": [ { - "end": 55, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 56, "start": 27, "type": "SHORT" }, @@ -13468,48 +13458,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_async.py" + "title": "aiplatform_v1_generated_gen_ai_tuning_service_create_tuning_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", - "shortName": "IndexEndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceClient", + "shortName": "GenAiTuningServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.delete_index_endpoint", + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceClient.create_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService.CreateTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService", + "shortName": "GenAiTuningService" }, - "shortName": "DeleteIndexEndpoint" + "shortName": "CreateTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.CreateTuningJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "tuning_job", + "type": "google.cloud.aiplatform_v1.types.TuningJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -13523,22 +13517,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_index_endpoint" + "resultType": "google.cloud.aiplatform_v1.types.TuningJob", + "shortName": "create_tuning_job" }, - "description": "Sample for DeleteIndexEndpoint", - "file": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_sync.py", + "description": "Sample for CreateTuningJob", + "file": "aiplatform_v1_generated_gen_ai_tuning_service_create_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeleteIndexEndpoint_sync", + "regionTag": "aiplatform_v1_generated_GenAiTuningService_CreateTuningJob_sync", "segments": [ { - "end": 55, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 56, "start": 27, "type": "SHORT" }, @@ -13548,53 +13542,49 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_sync.py" + "title": "aiplatform_v1_generated_gen_ai_tuning_service_create_tuning_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", - "shortName": "IndexEndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceAsyncClient", + "shortName": "GenAiTuningServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.deploy_index", + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceAsyncClient.get_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex", + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService.GetTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService", + "shortName": "GenAiTuningService" }, - "shortName": "DeployIndex" + "shortName": "GetTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeployIndexRequest" + "type": "google.cloud.aiplatform_v1.types.GetTuningJobRequest" }, { - "name": "index_endpoint", + "name": "name", "type": "str" }, - { - "name": "deployed_index", - "type": "google.cloud.aiplatform_v1.types.DeployedIndex" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -13608,22 +13598,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "deploy_index" + "resultType": "google.cloud.aiplatform_v1.types.TuningJob", + "shortName": "get_tuning_job" }, - "description": "Sample for DeployIndex", - "file": "aiplatform_v1_generated_index_endpoint_service_deploy_index_async.py", + "description": "Sample for GetTuningJob", + "file": "aiplatform_v1_generated_gen_ai_tuning_service_get_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeployIndex_async", + "regionTag": "aiplatform_v1_generated_GenAiTuningService_GetTuningJob_async", "segments": [ { - "end": 60, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 51, "start": 27, "type": "SHORT" }, @@ -13633,52 +13623,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_deploy_index_async.py" + "title": "aiplatform_v1_generated_gen_ai_tuning_service_get_tuning_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", - "shortName": "IndexEndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceClient", + "shortName": "GenAiTuningServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.deploy_index", + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceClient.get_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex", + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService.GetTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService", + "shortName": "GenAiTuningService" }, - "shortName": "DeployIndex" + "shortName": "GetTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeployIndexRequest" + "type": "google.cloud.aiplatform_v1.types.GetTuningJobRequest" }, { - "name": "index_endpoint", + "name": "name", "type": "str" }, - { - "name": "deployed_index", - "type": "google.cloud.aiplatform_v1.types.DeployedIndex" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -13692,22 +13678,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "deploy_index" + "resultType": "google.cloud.aiplatform_v1.types.TuningJob", + "shortName": "get_tuning_job" }, - "description": "Sample for DeployIndex", - "file": "aiplatform_v1_generated_index_endpoint_service_deploy_index_sync.py", + "description": "Sample for GetTuningJob", + "file": "aiplatform_v1_generated_gen_ai_tuning_service_get_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeployIndex_sync", + "regionTag": "aiplatform_v1_generated_GenAiTuningService_GetTuningJob_sync", "segments": [ { - "end": 60, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 51, "start": 27, "type": "SHORT" }, @@ -13717,47 +13703,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_deploy_index_sync.py" + "title": "aiplatform_v1_generated_gen_ai_tuning_service_get_tuning_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", - "shortName": "IndexEndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceAsyncClient", + "shortName": "GenAiTuningServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.get_index_endpoint", + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceAsyncClient.list_tuning_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService.ListTuningJobs", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService", + "shortName": "GenAiTuningService" }, - "shortName": "GetIndexEndpoint" + "shortName": "ListTuningJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.ListTuningJobsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -13773,22 +13759,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", - "shortName": "get_index_endpoint" + "resultType": "google.cloud.aiplatform_v1.services.gen_ai_tuning_service.pagers.ListTuningJobsAsyncPager", + "shortName": "list_tuning_jobs" }, - "description": "Sample for GetIndexEndpoint", - "file": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_async.py", + "description": "Sample for ListTuningJobs", + "file": "aiplatform_v1_generated_gen_ai_tuning_service_list_tuning_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_GetIndexEndpoint_async", + "regionTag": "aiplatform_v1_generated_GenAiTuningService_ListTuningJobs_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -13808,36 +13794,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_async.py" + "title": "aiplatform_v1_generated_gen_ai_tuning_service_list_tuning_jobs_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", - "shortName": "IndexEndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceClient", + "shortName": "GenAiTuningServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.get_index_endpoint", + "fullName": "google.cloud.aiplatform_v1.GenAiTuningServiceClient.list_tuning_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService.ListTuningJobs", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1.GenAiTuningService", + "shortName": "GenAiTuningService" }, - "shortName": "GetIndexEndpoint" + "shortName": "ListTuningJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.ListTuningJobsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -13853,22 +13839,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", - "shortName": "get_index_endpoint" + "resultType": "google.cloud.aiplatform_v1.services.gen_ai_tuning_service.pagers.ListTuningJobsPager", + "shortName": "list_tuning_jobs" }, - "description": "Sample for GetIndexEndpoint", - "file": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_sync.py", + "description": "Sample for ListTuningJobs", + "file": "aiplatform_v1_generated_gen_ai_tuning_service_list_tuning_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_GetIndexEndpoint_sync", + "regionTag": "aiplatform_v1_generated_GenAiTuningService_ListTuningJobs_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -13888,12 +13874,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_sync.py" + "title": "aiplatform_v1_generated_gen_ai_tuning_service_list_tuning_jobs_sync.py" }, { "canonical": true, @@ -13903,24 +13889,28 @@ "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.list_index_endpoints", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.create_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint", "service": { "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, - "shortName": "ListIndexEndpoints" + "shortName": "CreateIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest" + "type": "google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest" }, { "name": "parent", "type": "str" }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -13934,22 +13924,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager", - "shortName": "list_index_endpoints" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_index_endpoint" }, - "description": "Sample for ListIndexEndpoints", - "file": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_async.py", + "description": "Sample for CreateIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_ListIndexEndpoints_async", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_CreateIndexEndpoint_async", "segments": [ { - "end": 52, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 59, "start": 27, "type": "SHORT" }, @@ -13959,22 +13949,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_async.py" + "title": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_async.py" }, { "canonical": true, @@ -13983,24 +13973,28 @@ "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.list_index_endpoints", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.create_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.CreateIndexEndpoint", "service": { "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, - "shortName": "ListIndexEndpoints" + "shortName": "CreateIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest" + "type": "google.cloud.aiplatform_v1.types.CreateIndexEndpointRequest" }, { "name": "parent", "type": "str" }, + { + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14014,22 +14008,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.index_endpoint_service.pagers.ListIndexEndpointsPager", - "shortName": "list_index_endpoints" + "resultType": "google.api_core.operation.Operation", + "shortName": "create_index_endpoint" }, - "description": "Sample for ListIndexEndpoints", - "file": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_sync.py", + "description": "Sample for CreateIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_ListIndexEndpoints_sync", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_CreateIndexEndpoint_sync", "segments": [ { - "end": 52, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 59, "start": 27, "type": "SHORT" }, @@ -14039,22 +14033,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_sync.py" + "title": "aiplatform_v1_generated_index_endpoint_service_create_index_endpoint_sync.py" }, { "canonical": true, @@ -14064,28 +14058,24 @@ "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.mutate_deployed_index", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.delete_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint", "service": { "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, - "shortName": "MutateDeployedIndex" + "shortName": "DeleteIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest" }, { - "name": "index_endpoint", + "name": "name", "type": "str" }, - { - "name": "deployed_index", - "type": "google.cloud.aiplatform_v1.types.DeployedIndex" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14100,21 +14090,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "mutate_deployed_index" + "shortName": "delete_index_endpoint" }, - "description": "Sample for MutateDeployedIndex", - "file": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_async.py", + "description": "Sample for DeleteIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_MutateDeployedIndex_async", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeleteIndexEndpoint_async", "segments": [ { - "end": 60, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 55, "start": 27, "type": "SHORT" }, @@ -14124,22 +14114,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_async.py" + "title": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_async.py" }, { "canonical": true, @@ -14148,28 +14138,24 @@ "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.mutate_deployed_index", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.delete_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeleteIndexEndpoint", "service": { "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, - "shortName": "MutateDeployedIndex" + "shortName": "DeleteIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteIndexEndpointRequest" }, { - "name": "index_endpoint", + "name": "name", "type": "str" }, - { - "name": "deployed_index", - "type": "google.cloud.aiplatform_v1.types.DeployedIndex" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14184,21 +14170,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "mutate_deployed_index" + "shortName": "delete_index_endpoint" }, - "description": "Sample for MutateDeployedIndex", - "file": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_sync.py", + "description": "Sample for DeleteIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_MutateDeployedIndex_sync", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeleteIndexEndpoint_sync", "segments": [ { - "end": 60, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 55, "start": 27, "type": "SHORT" }, @@ -14208,22 +14194,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_sync.py" + "title": "aiplatform_v1_generated_index_endpoint_service_delete_index_endpoint_sync.py" }, { "canonical": true, @@ -14233,27 +14219,27 @@ "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.undeploy_index", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.deploy_index", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex", "service": { "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, - "shortName": "UndeployIndex" + "shortName": "DeployIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UndeployIndexRequest" + "type": "google.cloud.aiplatform_v1.types.DeployIndexRequest" }, { "name": "index_endpoint", "type": "str" }, { - "name": "deployed_index_id", - "type": "str" + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1.types.DeployedIndex" }, { "name": "retry", @@ -14269,21 +14255,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "undeploy_index" + "shortName": "deploy_index" }, - "description": "Sample for UndeployIndex", - "file": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_async.py", + "description": "Sample for DeployIndex", + "file": "aiplatform_v1_generated_index_endpoint_service_deploy_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_UndeployIndex_async", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeployIndex_async", "segments": [ { - "end": 56, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 60, "start": 27, "type": "SHORT" }, @@ -14293,22 +14279,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_async.py" + "title": "aiplatform_v1_generated_index_endpoint_service_deploy_index_async.py" }, { "canonical": true, @@ -14317,27 +14303,27 @@ "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.undeploy_index", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.deploy_index", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.DeployIndex", "service": { "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, - "shortName": "UndeployIndex" + "shortName": "DeployIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UndeployIndexRequest" + "type": "google.cloud.aiplatform_v1.types.DeployIndexRequest" }, { "name": "index_endpoint", "type": "str" }, { - "name": "deployed_index_id", - "type": "str" + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1.types.DeployedIndex" }, { "name": "retry", @@ -14353,21 +14339,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "undeploy_index" + "shortName": "deploy_index" }, - "description": "Sample for UndeployIndex", - "file": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_sync.py", + "description": "Sample for DeployIndex", + "file": "aiplatform_v1_generated_index_endpoint_service_deploy_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_UndeployIndex_sync", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_DeployIndex_sync", "segments": [ { - "end": 56, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 60, "start": 27, "type": "SHORT" }, @@ -14377,22 +14363,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_sync.py" + "title": "aiplatform_v1_generated_index_endpoint_service_deploy_index_sync.py" }, { "canonical": true, @@ -14402,27 +14388,23 @@ "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.update_index_endpoint", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.get_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint", "service": { "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, - "shortName": "UpdateIndexEndpoint" + "shortName": "GetIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest" - }, - { - "name": "index_endpoint", - "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + "type": "google.cloud.aiplatform_v1.types.GetIndexEndpointRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -14438,21 +14420,21 @@ } ], "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", - "shortName": "update_index_endpoint" + "shortName": "get_index_endpoint" }, - "description": "Sample for UpdateIndexEndpoint", - "file": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_async.py", + "description": "Sample for GetIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_UpdateIndexEndpoint_async", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_GetIndexEndpoint_async", "segments": [ { - "end": 54, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 51, "start": 27, "type": "SHORT" }, @@ -14462,22 +14444,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 49, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_async.py" + "title": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_async.py" }, { "canonical": true, @@ -14486,27 +14468,23 @@ "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.update_index_endpoint", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.get_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.GetIndexEndpoint", "service": { "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", "shortName": "IndexEndpointService" }, - "shortName": "UpdateIndexEndpoint" + "shortName": "GetIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1.types.GetIndexEndpointRequest" }, { - "name": "index_endpoint", - "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -14522,21 +14500,21 @@ } ], "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", - "shortName": "update_index_endpoint" + "shortName": "get_index_endpoint" }, - "description": "Sample for UpdateIndexEndpoint", - "file": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_sync.py", + "description": "Sample for GetIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexEndpointService_UpdateIndexEndpoint_sync", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_GetIndexEndpoint_sync", "segments": [ { - "end": 54, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 51, "start": 27, "type": "SHORT" }, @@ -14546,53 +14524,49 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 49, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_sync.py" + "title": "aiplatform_v1_generated_index_endpoint_service_get_index_endpoint_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", - "shortName": "IndexServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.create_index", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.list_index_endpoints", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.CreateIndex", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "CreateIndex" + "shortName": "ListIndexEndpoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateIndexRequest" + "type": "google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "index", - "type": "google.cloud.aiplatform_v1.types.Index" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14606,22 +14580,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_index" + "resultType": "google.cloud.aiplatform_v1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager", + "shortName": "list_index_endpoints" }, - "description": "Sample for CreateIndex", - "file": "aiplatform_v1_generated_index_service_create_index_async.py", + "description": "Sample for ListIndexEndpoints", + "file": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_CreateIndex_async", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_ListIndexEndpoints_async", "segments": [ { - "end": 59, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 52, "start": 27, "type": "SHORT" }, @@ -14631,52 +14605,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_create_index_async.py" + "title": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", - "shortName": "IndexServiceClient" + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.create_index", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.list_index_endpoints", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.CreateIndex", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.ListIndexEndpoints", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "CreateIndex" + "shortName": "ListIndexEndpoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateIndexRequest" + "type": "google.cloud.aiplatform_v1.types.ListIndexEndpointsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "index", - "type": "google.cloud.aiplatform_v1.types.Index" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14690,22 +14660,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_index" + "resultType": "google.cloud.aiplatform_v1.services.index_endpoint_service.pagers.ListIndexEndpointsPager", + "shortName": "list_index_endpoints" }, - "description": "Sample for CreateIndex", - "file": "aiplatform_v1_generated_index_service_create_index_sync.py", + "description": "Sample for ListIndexEndpoints", + "file": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_CreateIndex_sync", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_ListIndexEndpoints_sync", "segments": [ { - "end": 59, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 52, "start": 27, "type": "SHORT" }, @@ -14715,49 +14685,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_create_index_sync.py" + "title": "aiplatform_v1_generated_index_endpoint_service_list_index_endpoints_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", - "shortName": "IndexServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.delete_index", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.mutate_deployed_index", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.DeleteIndex", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "DeleteIndex" + "shortName": "MutateDeployedIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteIndexRequest" + "type": "google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest" }, { - "name": "name", + "name": "index_endpoint", "type": "str" }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1.types.DeployedIndex" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14772,21 +14746,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_index" + "shortName": "mutate_deployed_index" }, - "description": "Sample for DeleteIndex", - "file": "aiplatform_v1_generated_index_service_delete_index_async.py", + "description": "Sample for MutateDeployedIndex", + "file": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_DeleteIndex_async", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_MutateDeployedIndex_async", "segments": [ { - "end": 55, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 60, "start": 27, "type": "SHORT" }, @@ -14796,48 +14770,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_delete_index_async.py" + "title": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", - "shortName": "IndexServiceClient" + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.delete_index", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.mutate_deployed_index", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.DeleteIndex", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.MutateDeployedIndex", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "DeleteIndex" + "shortName": "MutateDeployedIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteIndexRequest" + "type": "google.cloud.aiplatform_v1.types.MutateDeployedIndexRequest" }, { - "name": "name", + "name": "index_endpoint", "type": "str" }, + { + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1.types.DeployedIndex" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14852,21 +14830,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_index" + "shortName": "mutate_deployed_index" }, - "description": "Sample for DeleteIndex", - "file": "aiplatform_v1_generated_index_service_delete_index_sync.py", + "description": "Sample for MutateDeployedIndex", + "file": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_DeleteIndex_sync", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_MutateDeployedIndex_sync", "segments": [ { - "end": 55, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 60, "start": 27, "type": "SHORT" }, @@ -14876,47 +14854,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_delete_index_sync.py" + "title": "aiplatform_v1_generated_index_endpoint_service_mutate_deployed_index_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", - "shortName": "IndexServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.get_index", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.undeploy_index", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.GetIndex", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "GetIndex" + "shortName": "UndeployIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetIndexRequest" + "type": "google.cloud.aiplatform_v1.types.UndeployIndexRequest" }, { - "name": "name", + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index_id", "type": "str" }, { @@ -14932,22 +14914,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Index", - "shortName": "get_index" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "undeploy_index" }, - "description": "Sample for GetIndex", - "file": "aiplatform_v1_generated_index_service_get_index_async.py", + "description": "Sample for UndeployIndex", + "file": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_GetIndex_async", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_UndeployIndex_async", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -14957,46 +14939,50 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_get_index_async.py" + "title": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", - "shortName": "IndexServiceClient" + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.get_index", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.undeploy_index", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.GetIndex", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UndeployIndex", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "GetIndex" + "shortName": "UndeployIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetIndexRequest" + "type": "google.cloud.aiplatform_v1.types.UndeployIndexRequest" }, { - "name": "name", + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index_id", "type": "str" }, { @@ -15012,22 +14998,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Index", - "shortName": "get_index" + "resultType": "google.api_core.operation.Operation", + "shortName": "undeploy_index" }, - "description": "Sample for GetIndex", - "file": "aiplatform_v1_generated_index_service_get_index_sync.py", + "description": "Sample for UndeployIndex", + "file": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_GetIndex_sync", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_UndeployIndex_sync", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -15037,48 +15023,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_get_index_sync.py" + "title": "aiplatform_v1_generated_index_endpoint_service_undeploy_index_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", - "shortName": "IndexServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.list_indexes", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceAsyncClient.update_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.ListIndexes", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "ListIndexes" + "shortName": "UpdateIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListIndexesRequest" + "type": "google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest" }, { - "name": "parent", - "type": "str" + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -15093,22 +15083,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.index_service.pagers.ListIndexesAsyncPager", - "shortName": "list_indexes" + "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", + "shortName": "update_index_endpoint" }, - "description": "Sample for ListIndexes", - "file": "aiplatform_v1_generated_index_service_list_indexes_async.py", + "description": "Sample for UpdateIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_ListIndexes_async", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_UpdateIndexEndpoint_async", "segments": [ { - "end": 52, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 54, "start": 27, "type": "SHORT" }, @@ -15118,47 +15108,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 51, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_list_indexes_async.py" + "title": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", - "shortName": "IndexServiceClient" + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.list_indexes", + "fullName": "google.cloud.aiplatform_v1.IndexEndpointServiceClient.update_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.ListIndexes", + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService.UpdateIndexEndpoint", "service": { - "fullName": "google.cloud.aiplatform.v1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "ListIndexes" + "shortName": "UpdateIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListIndexesRequest" + "type": "google.cloud.aiplatform_v1.types.UpdateIndexEndpointRequest" }, { - "name": "parent", - "type": "str" + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1.types.IndexEndpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -15173,22 +15167,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.index_service.pagers.ListIndexesPager", - "shortName": "list_indexes" + "resultType": "google.cloud.aiplatform_v1.types.IndexEndpoint", + "shortName": "update_index_endpoint" }, - "description": "Sample for ListIndexes", - "file": "aiplatform_v1_generated_index_service_list_indexes_sync.py", + "description": "Sample for UpdateIndexEndpoint", + "file": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_ListIndexes_sync", + "regionTag": "aiplatform_v1_generated_IndexEndpointService_UpdateIndexEndpoint_sync", "segments": [ { - "end": 52, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 54, "start": 27, "type": "SHORT" }, @@ -15198,22 +15192,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 51, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_list_indexes_sync.py" + "title": "aiplatform_v1_generated_index_endpoint_service_update_index_endpoint_sync.py" }, { "canonical": true, @@ -15223,19 +15217,27 @@ "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.remove_datapoints", + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.create_index", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.RemoveDatapoints", + "fullName": "google.cloud.aiplatform.v1.IndexService.CreateIndex", "service": { "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, - "shortName": "RemoveDatapoints" + "shortName": "CreateIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.RemoveDatapointsRequest" + "type": "google.cloud.aiplatform_v1.types.CreateIndexRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1.types.Index" }, { "name": "retry", @@ -15250,22 +15252,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.RemoveDatapointsResponse", - "shortName": "remove_datapoints" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_index" }, - "description": "Sample for RemoveDatapoints", - "file": "aiplatform_v1_generated_index_service_remove_datapoints_async.py", + "description": "Sample for CreateIndex", + "file": "aiplatform_v1_generated_index_service_create_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_RemoveDatapoints_async", + "regionTag": "aiplatform_v1_generated_IndexService_CreateIndex_async", "segments": [ { - "end": 51, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 59, "start": 27, "type": "SHORT" }, @@ -15275,22 +15277,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_remove_datapoints_async.py" + "title": "aiplatform_v1_generated_index_service_create_index_async.py" }, { "canonical": true, @@ -15299,19 +15301,27 @@ "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.remove_datapoints", + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.create_index", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.RemoveDatapoints", + "fullName": "google.cloud.aiplatform.v1.IndexService.CreateIndex", "service": { "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, - "shortName": "RemoveDatapoints" + "shortName": "CreateIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.RemoveDatapointsRequest" + "type": "google.cloud.aiplatform_v1.types.CreateIndexRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "index", + "type": "google.cloud.aiplatform_v1.types.Index" }, { "name": "retry", @@ -15326,22 +15336,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.RemoveDatapointsResponse", - "shortName": "remove_datapoints" + "resultType": "google.api_core.operation.Operation", + "shortName": "create_index" }, - "description": "Sample for RemoveDatapoints", - "file": "aiplatform_v1_generated_index_service_remove_datapoints_sync.py", + "description": "Sample for CreateIndex", + "file": "aiplatform_v1_generated_index_service_create_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_RemoveDatapoints_sync", + "regionTag": "aiplatform_v1_generated_IndexService_CreateIndex_sync", "segments": [ { - "end": 51, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 59, "start": 27, "type": "SHORT" }, @@ -15351,22 +15361,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_remove_datapoints_sync.py" + "title": "aiplatform_v1_generated_index_service_create_index_sync.py" }, { "canonical": true, @@ -15376,27 +15386,23 @@ "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.update_index", + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.delete_index", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.UpdateIndex", + "fullName": "google.cloud.aiplatform.v1.IndexService.DeleteIndex", "service": { "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, - "shortName": "UpdateIndex" + "shortName": "DeleteIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateIndexRequest" - }, - { - "name": "index", - "type": "google.cloud.aiplatform_v1.types.Index" + "type": "google.cloud.aiplatform_v1.types.DeleteIndexRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -15412,21 +15418,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_index" + "shortName": "delete_index" }, - "description": "Sample for UpdateIndex", - "file": "aiplatform_v1_generated_index_service_update_index_async.py", + "description": "Sample for DeleteIndex", + "file": "aiplatform_v1_generated_index_service_delete_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_UpdateIndex_async", + "regionTag": "aiplatform_v1_generated_IndexService_DeleteIndex_async", "segments": [ { - "end": 58, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 55, "start": 27, "type": "SHORT" }, @@ -15436,22 +15442,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 49, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_update_index_async.py" + "title": "aiplatform_v1_generated_index_service_delete_index_async.py" }, { "canonical": true, @@ -15460,27 +15466,23 @@ "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.update_index", + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.delete_index", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.UpdateIndex", + "fullName": "google.cloud.aiplatform.v1.IndexService.DeleteIndex", "service": { "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, - "shortName": "UpdateIndex" + "shortName": "DeleteIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateIndexRequest" - }, - { - "name": "index", - "type": "google.cloud.aiplatform_v1.types.Index" + "type": "google.cloud.aiplatform_v1.types.DeleteIndexRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -15496,21 +15498,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "update_index" + "shortName": "delete_index" }, - "description": "Sample for UpdateIndex", - "file": "aiplatform_v1_generated_index_service_update_index_sync.py", + "description": "Sample for DeleteIndex", + "file": "aiplatform_v1_generated_index_service_delete_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_UpdateIndex_sync", + "regionTag": "aiplatform_v1_generated_IndexService_DeleteIndex_sync", "segments": [ { - "end": 58, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 55, "start": 27, "type": "SHORT" }, @@ -15520,22 +15522,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 49, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_update_index_sync.py" + "title": "aiplatform_v1_generated_index_service_delete_index_sync.py" }, { "canonical": true, @@ -15545,19 +15547,23 @@ "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.upsert_datapoints", + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.get_index", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.UpsertDatapoints", + "fullName": "google.cloud.aiplatform.v1.IndexService.GetIndex", "service": { "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, - "shortName": "UpsertDatapoints" + "shortName": "GetIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpsertDatapointsRequest" + "type": "google.cloud.aiplatform_v1.types.GetIndexRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -15572,14 +15578,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.UpsertDatapointsResponse", - "shortName": "upsert_datapoints" + "resultType": "google.cloud.aiplatform_v1.types.Index", + "shortName": "get_index" }, - "description": "Sample for UpsertDatapoints", - "file": "aiplatform_v1_generated_index_service_upsert_datapoints_async.py", + "description": "Sample for GetIndex", + "file": "aiplatform_v1_generated_index_service_get_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_UpsertDatapoints_async", + "regionTag": "aiplatform_v1_generated_IndexService_GetIndex_async", "segments": [ { "end": 51, @@ -15612,7 +15618,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_upsert_datapoints_async.py" + "title": "aiplatform_v1_generated_index_service_get_index_async.py" }, { "canonical": true, @@ -15621,19 +15627,23 @@ "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.upsert_datapoints", + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.get_index", "method": { - "fullName": "google.cloud.aiplatform.v1.IndexService.UpsertDatapoints", + "fullName": "google.cloud.aiplatform.v1.IndexService.GetIndex", "service": { "fullName": "google.cloud.aiplatform.v1.IndexService", "shortName": "IndexService" }, - "shortName": "UpsertDatapoints" + "shortName": "GetIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpsertDatapointsRequest" + "type": "google.cloud.aiplatform_v1.types.GetIndexRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -15648,14 +15658,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.UpsertDatapointsResponse", - "shortName": "upsert_datapoints" + "resultType": "google.cloud.aiplatform_v1.types.Index", + "shortName": "get_index" }, - "description": "Sample for UpsertDatapoints", - "file": "aiplatform_v1_generated_index_service_upsert_datapoints_sync.py", + "description": "Sample for GetIndex", + "file": "aiplatform_v1_generated_index_service_get_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_IndexService_UpsertDatapoints_sync", + "regionTag": "aiplatform_v1_generated_IndexService_GetIndex_sync", "segments": [ { "end": 51, @@ -15688,32 +15698,32 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_index_service_upsert_datapoints_sync.py" + "title": "aiplatform_v1_generated_index_service_get_index_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.list_indexes", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1.IndexService.ListIndexes", "service": { - "fullName": "google.cloud.aiplatform.v1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" }, - "shortName": "CancelBatchPredictionJob" + "shortName": "ListIndexes" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1.types.ListIndexesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -15729,21 +15739,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_batch_prediction_job" + "resultType": "google.cloud.aiplatform_v1.services.index_service.pagers.ListIndexesAsyncPager", + "shortName": "list_indexes" }, - "description": "Sample for CancelBatchPredictionJob", - "file": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_async.py", + "description": "Sample for ListIndexes", + "file": "aiplatform_v1_generated_index_service_list_indexes_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CancelBatchPredictionJob_async", + "regionTag": "aiplatform_v1_generated_IndexService_ListIndexes_async", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -15758,39 +15769,41 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_async.py" + "title": "aiplatform_v1_generated_index_service_list_indexes_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.list_indexes", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1.IndexService.ListIndexes", "service": { - "fullName": "google.cloud.aiplatform.v1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" }, - "shortName": "CancelBatchPredictionJob" + "shortName": "ListIndexes" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1.types.ListIndexesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -15806,21 +15819,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_batch_prediction_job" + "resultType": "google.cloud.aiplatform_v1.services.index_service.pagers.ListIndexesPager", + "shortName": "list_indexes" }, - "description": "Sample for CancelBatchPredictionJob", - "file": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_sync.py", + "description": "Sample for ListIndexes", + "file": "aiplatform_v1_generated_index_service_list_indexes_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CancelBatchPredictionJob_sync", + "regionTag": "aiplatform_v1_generated_IndexService_ListIndexes_sync", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -15835,41 +15849,39 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_sync.py" + "title": "aiplatform_v1_generated_index_service_list_indexes_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_custom_job", + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.remove_datapoints", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CancelCustomJob", + "fullName": "google.cloud.aiplatform.v1.IndexService.RemoveDatapoints", "service": { - "fullName": "google.cloud.aiplatform.v1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" }, - "shortName": "CancelCustomJob" + "shortName": "RemoveDatapoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CancelCustomJobRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1.types.RemoveDatapointsRequest" }, { "name": "retry", @@ -15884,21 +15896,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_custom_job" + "resultType": "google.cloud.aiplatform_v1.types.RemoveDatapointsResponse", + "shortName": "remove_datapoints" }, - "description": "Sample for CancelCustomJob", - "file": "aiplatform_v1_generated_job_service_cancel_custom_job_async.py", + "description": "Sample for RemoveDatapoints", + "file": "aiplatform_v1_generated_index_service_remove_datapoints_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CancelCustomJob_async", + "regionTag": "aiplatform_v1_generated_IndexService_RemoveDatapoints_async", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -15913,40 +15926,38 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_cancel_custom_job_async.py" + "title": "aiplatform_v1_generated_index_service_remove_datapoints_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_custom_job", + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.remove_datapoints", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CancelCustomJob", + "fullName": "google.cloud.aiplatform.v1.IndexService.RemoveDatapoints", "service": { - "fullName": "google.cloud.aiplatform.v1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" }, - "shortName": "CancelCustomJob" + "shortName": "RemoveDatapoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CancelCustomJobRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1.types.RemoveDatapointsRequest" }, { "name": "retry", @@ -15961,21 +15972,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_custom_job" + "resultType": "google.cloud.aiplatform_v1.types.RemoveDatapointsResponse", + "shortName": "remove_datapoints" }, - "description": "Sample for CancelCustomJob", - "file": "aiplatform_v1_generated_job_service_cancel_custom_job_sync.py", + "description": "Sample for RemoveDatapoints", + "file": "aiplatform_v1_generated_index_service_remove_datapoints_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CancelCustomJob_sync", + "regionTag": "aiplatform_v1_generated_IndexService_RemoveDatapoints_sync", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -15990,41 +16002,47 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_cancel_custom_job_sync.py" + "title": "aiplatform_v1_generated_index_service_remove_datapoints_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.update_index", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1.IndexService.UpdateIndex", "service": { - "fullName": "google.cloud.aiplatform.v1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" }, - "shortName": "CancelDataLabelingJob" + "shortName": "UpdateIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1.types.UpdateIndexRequest" }, { - "name": "name", - "type": "str" + "name": "index", + "type": "google.cloud.aiplatform_v1.types.Index" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -16039,21 +16057,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_data_labeling_job" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_index" }, - "description": "Sample for CancelDataLabelingJob", - "file": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_async.py", + "description": "Sample for UpdateIndex", + "file": "aiplatform_v1_generated_index_service_update_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CancelDataLabelingJob_async", + "regionTag": "aiplatform_v1_generated_IndexService_UpdateIndex_async", "segments": [ { - "end": 49, + "end": 58, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 58, "start": 27, "type": "SHORT" }, @@ -16063,45 +16082,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "start": 46, + "end": 55, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 59, + "start": 56, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_async.py" + "title": "aiplatform_v1_generated_index_service_update_index_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.update_index", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1.IndexService.UpdateIndex", "service": { - "fullName": "google.cloud.aiplatform.v1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" }, - "shortName": "CancelDataLabelingJob" + "shortName": "UpdateIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1.types.UpdateIndexRequest" }, { - "name": "name", - "type": "str" + "name": "index", + "type": "google.cloud.aiplatform_v1.types.Index" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -16116,21 +16141,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_data_labeling_job" + "resultType": "google.api_core.operation.Operation", + "shortName": "update_index" }, - "description": "Sample for CancelDataLabelingJob", - "file": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_sync.py", + "description": "Sample for UpdateIndex", + "file": "aiplatform_v1_generated_index_service_update_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CancelDataLabelingJob_sync", + "regionTag": "aiplatform_v1_generated_IndexService_UpdateIndex_sync", "segments": [ { - "end": 49, + "end": 58, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 58, "start": 27, "type": "SHORT" }, @@ -16140,46 +16166,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "start": 46, + "end": 55, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 59, + "start": 56, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_sync.py" + "title": "aiplatform_v1_generated_index_service_update_index_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1.IndexServiceAsyncClient.upsert_datapoints", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1.IndexService.UpsertDatapoints", "service": { - "fullName": "google.cloud.aiplatform.v1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" }, - "shortName": "CancelHyperparameterTuningJob" + "shortName": "UpsertDatapoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1.types.UpsertDatapointsRequest" }, { "name": "retry", @@ -16194,21 +16218,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1.types.UpsertDatapointsResponse", + "shortName": "upsert_datapoints" }, - "description": "Sample for CancelHyperparameterTuningJob", - "file": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_async.py", + "description": "Sample for UpsertDatapoints", + "file": "aiplatform_v1_generated_index_service_upsert_datapoints_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CancelHyperparameterTuningJob_async", + "regionTag": "aiplatform_v1_generated_IndexService_UpsertDatapoints_async", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -16223,40 +16248,38 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_async.py" + "title": "aiplatform_v1_generated_index_service_upsert_datapoints_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient", + "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1.IndexServiceClient.upsert_datapoints", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1.IndexService.UpsertDatapoints", "service": { - "fullName": "google.cloud.aiplatform.v1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1.IndexService", + "shortName": "IndexService" }, - "shortName": "CancelHyperparameterTuningJob" + "shortName": "UpsertDatapoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1.types.UpsertDatapointsRequest" }, { "name": "retry", @@ -16271,21 +16294,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1.types.UpsertDatapointsResponse", + "shortName": "upsert_datapoints" }, - "description": "Sample for CancelHyperparameterTuningJob", - "file": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py", + "description": "Sample for UpsertDatapoints", + "file": "aiplatform_v1_generated_index_service_upsert_datapoints_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CancelHyperparameterTuningJob_sync", + "regionTag": "aiplatform_v1_generated_IndexService_UpsertDatapoints_sync", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -16300,15 +16324,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py" + "title": "aiplatform_v1_generated_index_service_upsert_datapoints_sync.py" }, { "canonical": true, @@ -16318,19 +16344,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_nas_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CancelNasJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CancelNasJob" + "shortName": "CancelBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CancelNasJobRequest" + "type": "google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest" }, { "name": "name", @@ -16349,13 +16375,13 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_nas_job" + "shortName": "cancel_batch_prediction_job" }, - "description": "Sample for CancelNasJob", - "file": "aiplatform_v1_generated_job_service_cancel_nas_job_async.py", + "description": "Sample for CancelBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CancelNasJob_async", + "regionTag": "aiplatform_v1_generated_JobService_CancelBatchPredictionJob_async", "segments": [ { "end": 49, @@ -16386,7 +16412,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_cancel_nas_job_async.py" + "title": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_async.py" }, { "canonical": true, @@ -16395,19 +16421,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_nas_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CancelNasJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CancelNasJob" + "shortName": "CancelBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CancelNasJobRequest" + "type": "google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest" }, { "name": "name", @@ -16426,13 +16452,13 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_nas_job" + "shortName": "cancel_batch_prediction_job" }, - "description": "Sample for CancelNasJob", - "file": "aiplatform_v1_generated_job_service_cancel_nas_job_sync.py", + "description": "Sample for CancelBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CancelNasJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_CancelBatchPredictionJob_sync", "segments": [ { "end": 49, @@ -16463,7 +16489,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_cancel_nas_job_sync.py" + "title": "aiplatform_v1_generated_job_service_cancel_batch_prediction_job_sync.py" }, { "canonical": true, @@ -16473,28 +16499,24 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CancelCustomJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CreateBatchPredictionJob" + "shortName": "CancelCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1.types.CancelCustomJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "batch_prediction_job", - "type": "google.cloud.aiplatform_v1.types.BatchPredictionJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -16508,22 +16530,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", - "shortName": "create_batch_prediction_job" + "shortName": "cancel_custom_job" }, - "description": "Sample for CreateBatchPredictionJob", - "file": "aiplatform_v1_generated_job_service_create_batch_prediction_job_async.py", + "description": "Sample for CancelCustomJob", + "file": "aiplatform_v1_generated_job_service_cancel_custom_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CreateBatchPredictionJob_async", + "regionTag": "aiplatform_v1_generated_JobService_CancelCustomJob_async", "segments": [ { - "end": 59, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 49, "start": 27, "type": "SHORT" }, @@ -16533,22 +16554,20 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 53, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 54, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_create_batch_prediction_job_async.py" + "title": "aiplatform_v1_generated_job_service_cancel_custom_job_async.py" }, { "canonical": true, @@ -16557,28 +16576,24 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CancelCustomJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CreateBatchPredictionJob" + "shortName": "CancelCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1.types.CancelCustomJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "batch_prediction_job", - "type": "google.cloud.aiplatform_v1.types.BatchPredictionJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -16592,22 +16607,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", - "shortName": "create_batch_prediction_job" + "shortName": "cancel_custom_job" }, - "description": "Sample for CreateBatchPredictionJob", - "file": "aiplatform_v1_generated_job_service_create_batch_prediction_job_sync.py", + "description": "Sample for CancelCustomJob", + "file": "aiplatform_v1_generated_job_service_cancel_custom_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CreateBatchPredictionJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_CancelCustomJob_sync", "segments": [ { - "end": 59, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 49, "start": 27, "type": "SHORT" }, @@ -16617,22 +16631,20 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 53, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 54, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_create_batch_prediction_job_sync.py" + "title": "aiplatform_v1_generated_job_service_cancel_custom_job_sync.py" }, { "canonical": true, @@ -16642,28 +16654,24 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_custom_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CreateCustomJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CreateCustomJob" + "shortName": "CancelDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateCustomJobRequest" + "type": "google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "custom_job", - "type": "google.cloud.aiplatform_v1.types.CustomJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -16677,22 +16685,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.CustomJob", - "shortName": "create_custom_job" + "shortName": "cancel_data_labeling_job" }, - "description": "Sample for CreateCustomJob", - "file": "aiplatform_v1_generated_job_service_create_custom_job_async.py", + "description": "Sample for CancelDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CreateCustomJob_async", + "regionTag": "aiplatform_v1_generated_JobService_CancelDataLabelingJob_async", "segments": [ { - "end": 56, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 49, "start": 27, "type": "SHORT" }, @@ -16702,22 +16709,20 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_create_custom_job_async.py" + "title": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_async.py" }, { "canonical": true, @@ -16726,28 +16731,24 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_custom_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CreateCustomJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CreateCustomJob" + "shortName": "CancelDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateCustomJobRequest" + "type": "google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "custom_job", - "type": "google.cloud.aiplatform_v1.types.CustomJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -16761,22 +16762,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.CustomJob", - "shortName": "create_custom_job" + "shortName": "cancel_data_labeling_job" }, - "description": "Sample for CreateCustomJob", - "file": "aiplatform_v1_generated_job_service_create_custom_job_sync.py", + "description": "Sample for CancelDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CreateCustomJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_CancelDataLabelingJob_sync", "segments": [ { - "end": 56, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 49, "start": 27, "type": "SHORT" }, @@ -16786,22 +16786,20 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_create_custom_job_sync.py" + "title": "aiplatform_v1_generated_job_service_cancel_data_labeling_job_sync.py" }, { "canonical": true, @@ -16811,28 +16809,24 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CreateDataLabelingJob" + "shortName": "CancelHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "data_labeling_job", - "type": "google.cloud.aiplatform_v1.types.DataLabelingJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -16846,22 +16840,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", - "shortName": "create_data_labeling_job" + "shortName": "cancel_hyperparameter_tuning_job" }, - "description": "Sample for CreateDataLabelingJob", - "file": "aiplatform_v1_generated_job_service_create_data_labeling_job_async.py", + "description": "Sample for CancelHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CreateDataLabelingJob_async", + "regionTag": "aiplatform_v1_generated_JobService_CancelHyperparameterTuningJob_async", "segments": [ { - "end": 60, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 49, "start": 27, "type": "SHORT" }, @@ -16871,22 +16864,20 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 55, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_create_data_labeling_job_async.py" + "title": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_async.py" }, { "canonical": true, @@ -16895,28 +16886,24 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CreateDataLabelingJob" + "shortName": "CancelHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "data_labeling_job", - "type": "google.cloud.aiplatform_v1.types.DataLabelingJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -16930,22 +16917,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", - "shortName": "create_data_labeling_job" + "shortName": "cancel_hyperparameter_tuning_job" }, - "description": "Sample for CreateDataLabelingJob", - "file": "aiplatform_v1_generated_job_service_create_data_labeling_job_sync.py", + "description": "Sample for CancelHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CreateDataLabelingJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_CancelHyperparameterTuningJob_sync", "segments": [ { - "end": 60, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 49, "start": 27, "type": "SHORT" }, @@ -16955,22 +16941,20 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 55, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_create_data_labeling_job_sync.py" + "title": "aiplatform_v1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py" }, { "canonical": true, @@ -16980,28 +16964,24 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.cancel_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CancelNasJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CreateHyperparameterTuningJob" + "shortName": "CancelNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1.types.CancelNasJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "hyperparameter_tuning_job", - "type": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17015,22 +16995,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", - "shortName": "create_hyperparameter_tuning_job" + "shortName": "cancel_nas_job" }, - "description": "Sample for CreateHyperparameterTuningJob", - "file": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_async.py", + "description": "Sample for CancelNasJob", + "file": "aiplatform_v1_generated_job_service_cancel_nas_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_async", + "regionTag": "aiplatform_v1_generated_JobService_CancelNasJob_async", "segments": [ { - "end": 63, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 63, + "end": 49, "start": 27, "type": "SHORT" }, @@ -17040,22 +17019,20 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 57, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 60, - "start": 58, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 64, - "start": 61, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_async.py" + "title": "aiplatform_v1_generated_job_service_cancel_nas_job_async.py" }, { "canonical": true, @@ -17064,28 +17041,24 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.cancel_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CancelNasJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CreateHyperparameterTuningJob" + "shortName": "CancelNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1.types.CancelNasJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "hyperparameter_tuning_job", - "type": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17099,22 +17072,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", - "shortName": "create_hyperparameter_tuning_job" + "shortName": "cancel_nas_job" }, - "description": "Sample for CreateHyperparameterTuningJob", - "file": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py", + "description": "Sample for CancelNasJob", + "file": "aiplatform_v1_generated_job_service_cancel_nas_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_CancelNasJob_sync", "segments": [ { - "end": 63, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 63, + "end": 49, "start": 27, "type": "SHORT" }, @@ -17124,22 +17096,20 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 57, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 60, - "start": 58, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 64, - "start": 61, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py" + "title": "aiplatform_v1_generated_job_service_cancel_nas_job_sync.py" }, { "canonical": true, @@ -17149,27 +17119,27 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CreateModelDeploymentMonitoringJob" + "shortName": "CreateBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest" }, { "name": "parent", "type": "str" }, { - "name": "model_deployment_monitoring_job", - "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + "name": "batch_prediction_job", + "type": "google.cloud.aiplatform_v1.types.BatchPredictionJob" }, { "name": "retry", @@ -17184,22 +17154,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", - "shortName": "create_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "create_batch_prediction_job" }, - "description": "Sample for CreateModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_async.py", + "description": "Sample for CreateBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_create_batch_prediction_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CreateModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1_generated_JobService_CreateBatchPredictionJob_async", "segments": [ { - "end": 56, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 59, "start": 27, "type": "SHORT" }, @@ -17209,22 +17179,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 53, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 56, + "start": 54, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1_generated_job_service_create_batch_prediction_job_async.py" }, { "canonical": true, @@ -17233,27 +17203,27 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CreateModelDeploymentMonitoringJob" + "shortName": "CreateBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest" }, { "name": "parent", "type": "str" }, { - "name": "model_deployment_monitoring_job", - "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + "name": "batch_prediction_job", + "type": "google.cloud.aiplatform_v1.types.BatchPredictionJob" }, { "name": "retry", @@ -17268,22 +17238,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", - "shortName": "create_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "create_batch_prediction_job" }, - "description": "Sample for CreateModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_sync.py", + "description": "Sample for CreateBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_create_batch_prediction_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CreateModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_CreateBatchPredictionJob_sync", "segments": [ { - "end": 56, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 59, "start": 27, "type": "SHORT" }, @@ -17293,22 +17263,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 53, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 56, + "start": 54, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1_generated_job_service_create_batch_prediction_job_sync.py" }, { "canonical": true, @@ -17318,27 +17288,27 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_nas_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CreateNasJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CreateCustomJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CreateNasJob" + "shortName": "CreateCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateNasJobRequest" + "type": "google.cloud.aiplatform_v1.types.CreateCustomJobRequest" }, { "name": "parent", "type": "str" }, { - "name": "nas_job", - "type": "google.cloud.aiplatform_v1.types.NasJob" + "name": "custom_job", + "type": "google.cloud.aiplatform_v1.types.CustomJob" }, { "name": "retry", @@ -17353,22 +17323,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.NasJob", - "shortName": "create_nas_job" + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "create_custom_job" }, - "description": "Sample for CreateNasJob", - "file": "aiplatform_v1_generated_job_service_create_nas_job_async.py", + "description": "Sample for CreateCustomJob", + "file": "aiplatform_v1_generated_job_service_create_custom_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CreateNasJob_async", + "regionTag": "aiplatform_v1_generated_JobService_CreateCustomJob_async", "segments": [ { - "end": 58, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 56, "start": 27, "type": "SHORT" }, @@ -17378,22 +17348,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 52, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 53, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_create_nas_job_async.py" + "title": "aiplatform_v1_generated_job_service_create_custom_job_async.py" }, { "canonical": true, @@ -17402,27 +17372,27 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_nas_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.CreateNasJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CreateCustomJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "CreateNasJob" + "shortName": "CreateCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateNasJobRequest" + "type": "google.cloud.aiplatform_v1.types.CreateCustomJobRequest" }, { "name": "parent", "type": "str" }, { - "name": "nas_job", - "type": "google.cloud.aiplatform_v1.types.NasJob" + "name": "custom_job", + "type": "google.cloud.aiplatform_v1.types.CustomJob" }, { "name": "retry", @@ -17437,22 +17407,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.NasJob", - "shortName": "create_nas_job" + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "create_custom_job" }, - "description": "Sample for CreateNasJob", - "file": "aiplatform_v1_generated_job_service_create_nas_job_sync.py", + "description": "Sample for CreateCustomJob", + "file": "aiplatform_v1_generated_job_service_create_custom_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_CreateNasJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_CreateCustomJob_sync", "segments": [ { - "end": 58, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 56, "start": 27, "type": "SHORT" }, @@ -17462,22 +17432,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 52, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 53, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_create_nas_job_sync.py" + "title": "aiplatform_v1_generated_job_service_create_custom_job_sync.py" }, { "canonical": true, @@ -17487,24 +17457,28 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteBatchPredictionJob" + "shortName": "CreateDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "data_labeling_job", + "type": "google.cloud.aiplatform_v1.types.DataLabelingJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17518,22 +17492,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_batch_prediction_job" + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "create_data_labeling_job" }, - "description": "Sample for DeleteBatchPredictionJob", - "file": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_async.py", + "description": "Sample for CreateDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_create_data_labeling_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteBatchPredictionJob_async", + "regionTag": "aiplatform_v1_generated_JobService_CreateDataLabelingJob_async", "segments": [ { - "end": 55, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 60, "start": 27, "type": "SHORT" }, @@ -17543,22 +17517,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 57, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_async.py" + "title": "aiplatform_v1_generated_job_service_create_data_labeling_job_async.py" }, { "canonical": true, @@ -17567,24 +17541,28 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteBatchPredictionJob" + "shortName": "CreateDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "data_labeling_job", + "type": "google.cloud.aiplatform_v1.types.DataLabelingJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17598,22 +17576,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_batch_prediction_job" + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "create_data_labeling_job" }, - "description": "Sample for DeleteBatchPredictionJob", - "file": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_sync.py", + "description": "Sample for CreateDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_create_data_labeling_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteBatchPredictionJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_CreateDataLabelingJob_sync", "segments": [ { - "end": 55, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 60, "start": 27, "type": "SHORT" }, @@ -17623,22 +17601,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 57, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_sync.py" + "title": "aiplatform_v1_generated_job_service_create_data_labeling_job_sync.py" }, { "canonical": true, @@ -17648,24 +17626,28 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_custom_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteCustomJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteCustomJob" + "shortName": "CreateHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteCustomJobRequest" + "type": "google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "hyperparameter_tuning_job", + "type": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17679,22 +17661,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_custom_job" + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "create_hyperparameter_tuning_job" }, - "description": "Sample for DeleteCustomJob", - "file": "aiplatform_v1_generated_job_service_delete_custom_job_async.py", + "description": "Sample for CreateHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteCustomJob_async", + "regionTag": "aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_async", "segments": [ { - "end": 55, + "end": 63, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 63, "start": 27, "type": "SHORT" }, @@ -17704,22 +17686,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 57, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 60, + "start": 58, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 64, + "start": 61, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_custom_job_async.py" + "title": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_async.py" }, { "canonical": true, @@ -17728,24 +17710,28 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_custom_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteCustomJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteCustomJob" + "shortName": "CreateHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteCustomJobRequest" + "type": "google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "hyperparameter_tuning_job", + "type": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17759,22 +17745,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_custom_job" + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "create_hyperparameter_tuning_job" }, - "description": "Sample for DeleteCustomJob", - "file": "aiplatform_v1_generated_job_service_delete_custom_job_sync.py", + "description": "Sample for CreateHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteCustomJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_CreateHyperparameterTuningJob_sync", "segments": [ { - "end": 55, + "end": 63, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 63, "start": 27, "type": "SHORT" }, @@ -17784,22 +17770,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 57, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 60, + "start": 58, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 64, + "start": 61, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_custom_job_sync.py" + "title": "aiplatform_v1_generated_job_service_create_hyperparameter_tuning_job_sync.py" }, { "canonical": true, @@ -17809,24 +17795,28 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteDataLabelingJob" + "shortName": "CreateModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17840,22 +17830,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_data_labeling_job" + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "create_model_deployment_monitoring_job" }, - "description": "Sample for DeleteDataLabelingJob", - "file": "aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py", + "description": "Sample for CreateModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteDataLabelingJob_async", + "regionTag": "aiplatform_v1_generated_JobService_CreateModelDeploymentMonitoringJob_async", "segments": [ { - "end": 55, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 56, "start": 27, "type": "SHORT" }, @@ -17865,22 +17855,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py" + "title": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_async.py" }, { "canonical": true, @@ -17889,24 +17879,28 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CreateModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteDataLabelingJob" + "shortName": "CreateModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1.types.CreateModelDeploymentMonitoringJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17920,22 +17914,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_data_labeling_job" + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "create_model_deployment_monitoring_job" }, - "description": "Sample for DeleteDataLabelingJob", - "file": "aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py", + "description": "Sample for CreateModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteDataLabelingJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_CreateModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 55, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 56, "start": 27, "type": "SHORT" }, @@ -17945,22 +17939,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py" + "title": "aiplatform_v1_generated_job_service_create_model_deployment_monitoring_job_sync.py" }, { "canonical": true, @@ -17970,24 +17964,28 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.create_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CreateNasJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteHyperparameterTuningJob" + "shortName": "CreateNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1.types.CreateNasJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "nas_job", + "type": "google.cloud.aiplatform_v1.types.NasJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -18001,22 +17999,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1.types.NasJob", + "shortName": "create_nas_job" }, - "description": "Sample for DeleteHyperparameterTuningJob", - "file": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py", + "description": "Sample for CreateNasJob", + "file": "aiplatform_v1_generated_job_service_create_nas_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_async", + "regionTag": "aiplatform_v1_generated_JobService_CreateNasJob_async", "segments": [ { - "end": 55, + "end": 58, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 58, "start": 27, "type": "SHORT" }, @@ -18026,22 +18024,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 52, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 55, + "start": 53, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 59, + "start": 56, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py" + "title": "aiplatform_v1_generated_job_service_create_nas_job_async.py" }, { "canonical": true, @@ -18050,24 +18048,28 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.create_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1.JobService.CreateNasJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteHyperparameterTuningJob" + "shortName": "CreateNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1.types.CreateNasJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "nas_job", + "type": "google.cloud.aiplatform_v1.types.NasJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -18081,22 +18083,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1.types.NasJob", + "shortName": "create_nas_job" }, - "description": "Sample for DeleteHyperparameterTuningJob", - "file": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py", + "description": "Sample for CreateNasJob", + "file": "aiplatform_v1_generated_job_service_create_nas_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_CreateNasJob_sync", "segments": [ { - "end": 55, + "end": 58, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 58, "start": 27, "type": "SHORT" }, @@ -18106,22 +18108,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 52, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 55, + "start": 53, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 59, + "start": 56, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py" + "title": "aiplatform_v1_generated_job_service_create_nas_job_sync.py" }, { "canonical": true, @@ -18131,19 +18133,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteModelDeploymentMonitoringJob" + "shortName": "DeleteBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest" }, { "name": "name", @@ -18163,13 +18165,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_model_deployment_monitoring_job" + "shortName": "delete_batch_prediction_job" }, - "description": "Sample for DeleteModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py", + "description": "Sample for DeleteBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1_generated_JobService_DeleteBatchPredictionJob_async", "segments": [ { "end": 55, @@ -18202,7 +18204,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_async.py" }, { "canonical": true, @@ -18211,19 +18213,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteModelDeploymentMonitoringJob" + "shortName": "DeleteBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest" }, { "name": "name", @@ -18243,13 +18245,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_model_deployment_monitoring_job" + "shortName": "delete_batch_prediction_job" }, - "description": "Sample for DeleteModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py", + "description": "Sample for DeleteBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_DeleteBatchPredictionJob_sync", "segments": [ { "end": 55, @@ -18282,7 +18284,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1_generated_job_service_delete_batch_prediction_job_sync.py" }, { "canonical": true, @@ -18292,19 +18294,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_nas_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteNasJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteCustomJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteNasJob" + "shortName": "DeleteCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteNasJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteCustomJobRequest" }, { "name": "name", @@ -18324,13 +18326,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_nas_job" + "shortName": "delete_custom_job" }, - "description": "Sample for DeleteNasJob", - "file": "aiplatform_v1_generated_job_service_delete_nas_job_async.py", + "description": "Sample for DeleteCustomJob", + "file": "aiplatform_v1_generated_job_service_delete_custom_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteNasJob_async", + "regionTag": "aiplatform_v1_generated_JobService_DeleteCustomJob_async", "segments": [ { "end": 55, @@ -18363,7 +18365,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_nas_job_async.py" + "title": "aiplatform_v1_generated_job_service_delete_custom_job_async.py" }, { "canonical": true, @@ -18372,19 +18374,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_nas_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.DeleteNasJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteCustomJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "DeleteNasJob" + "shortName": "DeleteCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteNasJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteCustomJobRequest" }, { "name": "name", @@ -18404,13 +18406,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_nas_job" + "shortName": "delete_custom_job" }, - "description": "Sample for DeleteNasJob", - "file": "aiplatform_v1_generated_job_service_delete_nas_job_sync.py", + "description": "Sample for DeleteCustomJob", + "file": "aiplatform_v1_generated_job_service_delete_custom_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_DeleteNasJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_DeleteCustomJob_sync", "segments": [ { "end": 55, @@ -18443,7 +18445,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_delete_nas_job_sync.py" + "title": "aiplatform_v1_generated_job_service_delete_custom_job_sync.py" }, { "canonical": true, @@ -18453,19 +18455,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetBatchPredictionJob" + "shortName": "DeleteDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest" }, { "name": "name", @@ -18484,22 +18486,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", - "shortName": "get_batch_prediction_job" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_data_labeling_job" }, - "description": "Sample for GetBatchPredictionJob", - "file": "aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py", + "description": "Sample for DeleteDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetBatchPredictionJob_async", + "regionTag": "aiplatform_v1_generated_JobService_DeleteDataLabelingJob_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -18514,17 +18516,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py" + "title": "aiplatform_v1_generated_job_service_delete_data_labeling_job_async.py" }, { "canonical": true, @@ -18533,19 +18535,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetBatchPredictionJob" + "shortName": "DeleteDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest" }, { "name": "name", @@ -18564,22 +18566,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", - "shortName": "get_batch_prediction_job" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_data_labeling_job" }, - "description": "Sample for GetBatchPredictionJob", - "file": "aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py", + "description": "Sample for DeleteDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetBatchPredictionJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_DeleteDataLabelingJob_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -18594,17 +18596,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py" + "title": "aiplatform_v1_generated_job_service_delete_data_labeling_job_sync.py" }, { "canonical": true, @@ -18614,19 +18616,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_custom_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetCustomJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetCustomJob" + "shortName": "DeleteHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetCustomJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest" }, { "name": "name", @@ -18645,22 +18647,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.CustomJob", - "shortName": "get_custom_job" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_hyperparameter_tuning_job" }, - "description": "Sample for GetCustomJob", - "file": "aiplatform_v1_generated_job_service_get_custom_job_async.py", + "description": "Sample for DeleteHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetCustomJob_async", + "regionTag": "aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -18675,17 +18677,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_custom_job_async.py" + "title": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_async.py" }, { "canonical": true, @@ -18694,19 +18696,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_custom_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetCustomJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetCustomJob" + "shortName": "DeleteHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetCustomJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest" }, { "name": "name", @@ -18725,22 +18727,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.CustomJob", - "shortName": "get_custom_job" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_hyperparameter_tuning_job" }, - "description": "Sample for GetCustomJob", - "file": "aiplatform_v1_generated_job_service_get_custom_job_sync.py", + "description": "Sample for DeleteHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetCustomJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_DeleteHyperparameterTuningJob_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -18755,17 +18757,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_custom_job_sync.py" + "title": "aiplatform_v1_generated_job_service_delete_hyperparameter_tuning_job_sync.py" }, { "canonical": true, @@ -18775,19 +18777,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetDataLabelingJob" + "shortName": "DeleteModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -18806,22 +18808,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", - "shortName": "get_data_labeling_job" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model_deployment_monitoring_job" }, - "description": "Sample for GetDataLabelingJob", - "file": "aiplatform_v1_generated_job_service_get_data_labeling_job_async.py", + "description": "Sample for DeleteModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetDataLabelingJob_async", + "regionTag": "aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -18836,17 +18838,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_data_labeling_job_async.py" + "title": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_async.py" }, { "canonical": true, @@ -18855,19 +18857,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetDataLabelingJob" + "shortName": "DeleteModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -18886,22 +18888,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", - "shortName": "get_data_labeling_job" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model_deployment_monitoring_job" }, - "description": "Sample for GetDataLabelingJob", - "file": "aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py", + "description": "Sample for DeleteModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetDataLabelingJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -18916,17 +18918,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py" + "title": "aiplatform_v1_generated_job_service_delete_model_deployment_monitoring_job_sync.py" }, { "canonical": true, @@ -18936,19 +18938,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.delete_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteNasJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetHyperparameterTuningJob" + "shortName": "DeleteNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteNasJobRequest" }, { "name": "name", @@ -18967,22 +18969,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", - "shortName": "get_hyperparameter_tuning_job" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_nas_job" }, - "description": "Sample for GetHyperparameterTuningJob", - "file": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py", + "description": "Sample for DeleteNasJob", + "file": "aiplatform_v1_generated_job_service_delete_nas_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_async", + "regionTag": "aiplatform_v1_generated_JobService_DeleteNasJob_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -18997,17 +18999,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py" + "title": "aiplatform_v1_generated_job_service_delete_nas_job_async.py" }, { "canonical": true, @@ -19016,19 +19018,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.delete_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1.JobService.DeleteNasJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetHyperparameterTuningJob" + "shortName": "DeleteNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteNasJobRequest" }, { "name": "name", @@ -19047,22 +19049,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", - "shortName": "get_hyperparameter_tuning_job" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_nas_job" }, - "description": "Sample for GetHyperparameterTuningJob", - "file": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py", + "description": "Sample for DeleteNasJob", + "file": "aiplatform_v1_generated_job_service_delete_nas_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_DeleteNasJob_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -19077,17 +19079,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py" + "title": "aiplatform_v1_generated_job_service_delete_nas_job_sync.py" }, { "canonical": true, @@ -19097,19 +19099,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetModelDeploymentMonitoringJob" + "shortName": "GetBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest" }, { "name": "name", @@ -19128,14 +19130,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", - "shortName": "get_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" }, - "description": "Sample for GetModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py", + "description": "Sample for GetBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1_generated_JobService_GetBatchPredictionJob_async", "segments": [ { "end": 51, @@ -19168,7 +19170,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1_generated_job_service_get_batch_prediction_job_async.py" }, { "canonical": true, @@ -19177,19 +19179,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetModelDeploymentMonitoringJob" + "shortName": "GetBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest" }, { "name": "name", @@ -19208,14 +19210,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", - "shortName": "get_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" }, - "description": "Sample for GetModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py", + "description": "Sample for GetBatchPredictionJob", + "file": "aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetBatchPredictionJob_sync", "segments": [ { "end": 51, @@ -19248,7 +19250,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1_generated_job_service_get_batch_prediction_job_sync.py" }, { "canonical": true, @@ -19258,19 +19260,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_nas_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetNasJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetCustomJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetNasJob" + "shortName": "GetCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetNasJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetCustomJobRequest" }, { "name": "name", @@ -19289,14 +19291,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.NasJob", - "shortName": "get_nas_job" + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "get_custom_job" }, - "description": "Sample for GetNasJob", - "file": "aiplatform_v1_generated_job_service_get_nas_job_async.py", + "description": "Sample for GetCustomJob", + "file": "aiplatform_v1_generated_job_service_get_custom_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetNasJob_async", + "regionTag": "aiplatform_v1_generated_JobService_GetCustomJob_async", "segments": [ { "end": 51, @@ -19329,7 +19331,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_nas_job_async.py" + "title": "aiplatform_v1_generated_job_service_get_custom_job_async.py" }, { "canonical": true, @@ -19338,19 +19340,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_nas_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetNasJob", + "fullName": "google.cloud.aiplatform.v1.JobService.GetCustomJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetNasJob" + "shortName": "GetCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetNasJobRequest" + "type": "google.cloud.aiplatform_v1.types.GetCustomJobRequest" }, { "name": "name", @@ -19369,14 +19371,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.NasJob", - "shortName": "get_nas_job" + "resultType": "google.cloud.aiplatform_v1.types.CustomJob", + "shortName": "get_custom_job" }, - "description": "Sample for GetNasJob", - "file": "aiplatform_v1_generated_job_service_get_nas_job_sync.py", + "description": "Sample for GetCustomJob", + "file": "aiplatform_v1_generated_job_service_get_custom_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetNasJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetCustomJob_sync", "segments": [ { "end": 51, @@ -19409,7 +19411,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_nas_job_sync.py" + "title": "aiplatform_v1_generated_job_service_get_custom_job_sync.py" }, { "canonical": true, @@ -19419,19 +19421,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_nas_trial_detail", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetNasTrialDetail", + "fullName": "google.cloud.aiplatform.v1.JobService.GetDataLabelingJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetNasTrialDetail" + "shortName": "GetDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetNasTrialDetailRequest" + "type": "google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest" }, { "name": "name", @@ -19450,14 +19452,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.NasTrialDetail", - "shortName": "get_nas_trial_detail" + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" }, - "description": "Sample for GetNasTrialDetail", - "file": "aiplatform_v1_generated_job_service_get_nas_trial_detail_async.py", + "description": "Sample for GetDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_get_data_labeling_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetNasTrialDetail_async", + "regionTag": "aiplatform_v1_generated_JobService_GetDataLabelingJob_async", "segments": [ { "end": 51, @@ -19490,7 +19492,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_nas_trial_detail_async.py" + "title": "aiplatform_v1_generated_job_service_get_data_labeling_job_async.py" }, { "canonical": true, @@ -19499,19 +19501,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_nas_trial_detail", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.GetNasTrialDetail", + "fullName": "google.cloud.aiplatform.v1.JobService.GetDataLabelingJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "GetNasTrialDetail" + "shortName": "GetDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetNasTrialDetailRequest" + "type": "google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest" }, { "name": "name", @@ -19530,14 +19532,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.NasTrialDetail", - "shortName": "get_nas_trial_detail" + "resultType": "google.cloud.aiplatform_v1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" }, - "description": "Sample for GetNasTrialDetail", - "file": "aiplatform_v1_generated_job_service_get_nas_trial_detail_sync.py", + "description": "Sample for GetDataLabelingJob", + "file": "aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_GetNasTrialDetail_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetDataLabelingJob_sync", "segments": [ { "end": 51, @@ -19570,7 +19572,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_get_nas_trial_detail_sync.py" + "title": "aiplatform_v1_generated_job_service_get_data_labeling_job_sync.py" }, { "canonical": true, @@ -19580,22 +19582,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_batch_prediction_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListBatchPredictionJobs" + "shortName": "GetHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest" + "type": "google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -19611,22 +19613,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager", - "shortName": "list_batch_prediction_jobs" + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" }, - "description": "Sample for ListBatchPredictionJobs", - "file": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py", + "description": "Sample for GetHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListBatchPredictionJobs_async", + "regionTag": "aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -19646,12 +19648,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py" + "title": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_async.py" }, { "canonical": true, @@ -19660,22 +19662,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_batch_prediction_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListBatchPredictionJobs" + "shortName": "GetHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest" + "type": "google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -19691,22 +19693,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager", - "shortName": "list_batch_prediction_jobs" + "resultType": "google.cloud.aiplatform_v1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" }, - "description": "Sample for ListBatchPredictionJobs", - "file": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py", + "description": "Sample for GetHyperparameterTuningJob", + "file": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListBatchPredictionJobs_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetHyperparameterTuningJob_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -19726,12 +19728,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py" + "title": "aiplatform_v1_generated_job_service_get_hyperparameter_tuning_job_sync.py" }, { "canonical": true, @@ -19741,22 +19743,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_custom_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListCustomJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListCustomJobs" + "shortName": "GetModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListCustomJobsRequest" + "type": "google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -19772,22 +19774,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsAsyncPager", - "shortName": "list_custom_jobs" + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" }, - "description": "Sample for ListCustomJobs", - "file": "aiplatform_v1_generated_job_service_list_custom_jobs_async.py", + "description": "Sample for GetModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListCustomJobs_async", + "regionTag": "aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -19807,12 +19809,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_custom_jobs_async.py" + "title": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_async.py" }, { "canonical": true, @@ -19821,22 +19823,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_custom_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListCustomJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.GetModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListCustomJobs" + "shortName": "GetModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListCustomJobsRequest" + "type": "google.cloud.aiplatform_v1.types.GetModelDeploymentMonitoringJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -19852,22 +19854,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager", - "shortName": "list_custom_jobs" + "resultType": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" }, - "description": "Sample for ListCustomJobs", - "file": "aiplatform_v1_generated_job_service_list_custom_jobs_sync.py", + "description": "Sample for GetModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListCustomJobs_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -19887,12 +19889,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_custom_jobs_sync.py" + "title": "aiplatform_v1_generated_job_service_get_model_deployment_monitoring_job_sync.py" }, { "canonical": true, @@ -19902,22 +19904,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_data_labeling_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.GetNasJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListDataLabelingJobs" + "shortName": "GetNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest" + "type": "google.cloud.aiplatform_v1.types.GetNasJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -19933,22 +19935,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsAsyncPager", - "shortName": "list_data_labeling_jobs" + "resultType": "google.cloud.aiplatform_v1.types.NasJob", + "shortName": "get_nas_job" }, - "description": "Sample for ListDataLabelingJobs", - "file": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py", + "description": "Sample for GetNasJob", + "file": "aiplatform_v1_generated_job_service_get_nas_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListDataLabelingJobs_async", + "regionTag": "aiplatform_v1_generated_JobService_GetNasJob_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -19968,12 +19970,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py" + "title": "aiplatform_v1_generated_job_service_get_nas_job_async.py" }, { "canonical": true, @@ -19982,22 +19984,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_data_labeling_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.GetNasJob", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListDataLabelingJobs" + "shortName": "GetNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest" + "type": "google.cloud.aiplatform_v1.types.GetNasJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20013,22 +20015,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager", - "shortName": "list_data_labeling_jobs" + "resultType": "google.cloud.aiplatform_v1.types.NasJob", + "shortName": "get_nas_job" }, - "description": "Sample for ListDataLabelingJobs", - "file": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py", + "description": "Sample for GetNasJob", + "file": "aiplatform_v1_generated_job_service_get_nas_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListDataLabelingJobs_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetNasJob_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -20048,12 +20050,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py" + "title": "aiplatform_v1_generated_job_service_get_nas_job_sync.py" }, { "canonical": true, @@ -20063,22 +20065,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_hyperparameter_tuning_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.get_nas_trial_detail", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.GetNasTrialDetail", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListHyperparameterTuningJobs" + "shortName": "GetNasTrialDetail" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest" + "type": "google.cloud.aiplatform_v1.types.GetNasTrialDetailRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20094,22 +20096,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager", - "shortName": "list_hyperparameter_tuning_jobs" + "resultType": "google.cloud.aiplatform_v1.types.NasTrialDetail", + "shortName": "get_nas_trial_detail" }, - "description": "Sample for ListHyperparameterTuningJobs", - "file": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py", + "description": "Sample for GetNasTrialDetail", + "file": "aiplatform_v1_generated_job_service_get_nas_trial_detail_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_async", + "regionTag": "aiplatform_v1_generated_JobService_GetNasTrialDetail_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -20129,12 +20131,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py" + "title": "aiplatform_v1_generated_job_service_get_nas_trial_detail_async.py" }, { "canonical": true, @@ -20143,22 +20145,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_hyperparameter_tuning_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.get_nas_trial_detail", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.GetNasTrialDetail", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListHyperparameterTuningJobs" + "shortName": "GetNasTrialDetail" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest" + "type": "google.cloud.aiplatform_v1.types.GetNasTrialDetailRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20174,22 +20176,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager", - "shortName": "list_hyperparameter_tuning_jobs" + "resultType": "google.cloud.aiplatform_v1.types.NasTrialDetail", + "shortName": "get_nas_trial_detail" }, - "description": "Sample for ListHyperparameterTuningJobs", - "file": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py", + "description": "Sample for GetNasTrialDetail", + "file": "aiplatform_v1_generated_job_service_get_nas_trial_detail_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_sync", + "regionTag": "aiplatform_v1_generated_JobService_GetNasTrialDetail_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -20209,12 +20211,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py" + "title": "aiplatform_v1_generated_job_service_get_nas_trial_detail_sync.py" }, { "canonical": true, @@ -20224,19 +20226,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_model_deployment_monitoring_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_batch_prediction_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListModelDeploymentMonitoringJobs" + "shortName": "ListBatchPredictionJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest" }, { "name": "parent", @@ -20255,14 +20257,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager", - "shortName": "list_model_deployment_monitoring_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager", + "shortName": "list_batch_prediction_jobs" }, - "description": "Sample for ListModelDeploymentMonitoringJobs", - "file": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py", + "description": "Sample for ListBatchPredictionJobs", + "file": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_async", + "regionTag": "aiplatform_v1_generated_JobService_ListBatchPredictionJobs_async", "segments": [ { "end": 52, @@ -20295,7 +20297,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py" + "title": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_async.py" }, { "canonical": true, @@ -20304,19 +20306,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_model_deployment_monitoring_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_batch_prediction_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListModelDeploymentMonitoringJobs" + "shortName": "ListBatchPredictionJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest" }, { "name": "parent", @@ -20335,14 +20337,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager", - "shortName": "list_model_deployment_monitoring_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager", + "shortName": "list_batch_prediction_jobs" }, - "description": "Sample for ListModelDeploymentMonitoringJobs", - "file": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py", + "description": "Sample for ListBatchPredictionJobs", + "file": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListBatchPredictionJobs_sync", "segments": [ { "end": 52, @@ -20375,7 +20377,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py" + "title": "aiplatform_v1_generated_job_service_list_batch_prediction_jobs_sync.py" }, { "canonical": true, @@ -20385,19 +20387,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_nas_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_custom_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListNasJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListCustomJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListNasJobs" + "shortName": "ListCustomJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListNasJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListCustomJobsRequest" }, { "name": "parent", @@ -20416,14 +20418,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasJobsAsyncPager", - "shortName": "list_nas_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsAsyncPager", + "shortName": "list_custom_jobs" }, - "description": "Sample for ListNasJobs", - "file": "aiplatform_v1_generated_job_service_list_nas_jobs_async.py", + "description": "Sample for ListCustomJobs", + "file": "aiplatform_v1_generated_job_service_list_custom_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListNasJobs_async", + "regionTag": "aiplatform_v1_generated_JobService_ListCustomJobs_async", "segments": [ { "end": 52, @@ -20456,7 +20458,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_nas_jobs_async.py" + "title": "aiplatform_v1_generated_job_service_list_custom_jobs_async.py" }, { "canonical": true, @@ -20465,19 +20467,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_nas_jobs", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_custom_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListNasJobs", + "fullName": "google.cloud.aiplatform.v1.JobService.ListCustomJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListNasJobs" + "shortName": "ListCustomJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListNasJobsRequest" + "type": "google.cloud.aiplatform_v1.types.ListCustomJobsRequest" }, { "name": "parent", @@ -20496,14 +20498,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasJobsPager", - "shortName": "list_nas_jobs" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager", + "shortName": "list_custom_jobs" }, - "description": "Sample for ListNasJobs", - "file": "aiplatform_v1_generated_job_service_list_nas_jobs_sync.py", + "description": "Sample for ListCustomJobs", + "file": "aiplatform_v1_generated_job_service_list_custom_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListNasJobs_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListCustomJobs_sync", "segments": [ { "end": 52, @@ -20536,7 +20538,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_nas_jobs_sync.py" + "title": "aiplatform_v1_generated_job_service_list_custom_jobs_sync.py" }, { "canonical": true, @@ -20546,19 +20548,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_nas_trial_details", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_data_labeling_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListNasTrialDetails", + "fullName": "google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListNasTrialDetails" + "shortName": "ListDataLabelingJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListNasTrialDetailsRequest" + "type": "google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest" }, { "name": "parent", @@ -20577,14 +20579,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasTrialDetailsAsyncPager", - "shortName": "list_nas_trial_details" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsAsyncPager", + "shortName": "list_data_labeling_jobs" }, - "description": "Sample for ListNasTrialDetails", - "file": "aiplatform_v1_generated_job_service_list_nas_trial_details_async.py", + "description": "Sample for ListDataLabelingJobs", + "file": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListNasTrialDetails_async", + "regionTag": "aiplatform_v1_generated_JobService_ListDataLabelingJobs_async", "segments": [ { "end": 52, @@ -20617,7 +20619,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_nas_trial_details_async.py" + "title": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_async.py" }, { "canonical": true, @@ -20626,19 +20628,19 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_nas_trial_details", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_data_labeling_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ListNasTrialDetails", + "fullName": "google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ListNasTrialDetails" + "shortName": "ListDataLabelingJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListNasTrialDetailsRequest" + "type": "google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest" }, { "name": "parent", @@ -20657,14 +20659,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasTrialDetailsPager", - "shortName": "list_nas_trial_details" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager", + "shortName": "list_data_labeling_jobs" }, - "description": "Sample for ListNasTrialDetails", - "file": "aiplatform_v1_generated_job_service_list_nas_trial_details_sync.py", + "description": "Sample for ListDataLabelingJobs", + "file": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ListNasTrialDetails_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListDataLabelingJobs_sync", "segments": [ { "end": 52, @@ -20697,7 +20699,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_list_nas_trial_details_sync.py" + "title": "aiplatform_v1_generated_job_service_list_data_labeling_jobs_sync.py" }, { "canonical": true, @@ -20707,22 +20709,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.pause_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_hyperparameter_tuning_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "PauseModelDeploymentMonitoringJob" + "shortName": "ListHyperparameterTuningJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -20738,21 +20740,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "pause_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager", + "shortName": "list_hyperparameter_tuning_jobs" }, - "description": "Sample for PauseModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py", + "description": "Sample for ListHyperparameterTuningJobs", + "file": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_async", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -20767,15 +20770,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_async.py" }, { "canonical": true, @@ -20784,22 +20789,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.pause_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_hyperparameter_tuning_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "PauseModelDeploymentMonitoringJob" + "shortName": "ListHyperparameterTuningJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -20815,21 +20820,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "pause_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager", + "shortName": "list_hyperparameter_tuning_jobs" }, - "description": "Sample for PauseModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py", + "description": "Sample for ListHyperparameterTuningJobs", + "file": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListHyperparameterTuningJobs_sync", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -20844,15 +20850,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py" }, { "canonical": true, @@ -20862,22 +20870,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.resume_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_model_deployment_monitoring_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ResumeModelDeploymentMonitoringJob" + "shortName": "ListModelDeploymentMonitoringJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -20893,21 +20901,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "resume_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager", + "shortName": "list_model_deployment_monitoring_jobs" }, - "description": "Sample for ResumeModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py", + "description": "Sample for ListModelDeploymentMonitoringJobs", + "file": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_async", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -20922,15 +20931,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_async.py" }, { "canonical": true, @@ -20939,22 +20950,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.resume_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_model_deployment_monitoring_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.ListModelDeploymentMonitoringJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "ResumeModelDeploymentMonitoringJob" + "shortName": "ListModelDeploymentMonitoringJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1.types.ListModelDeploymentMonitoringJobsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -20970,21 +20981,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "resume_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager", + "shortName": "list_model_deployment_monitoring_jobs" }, - "description": "Sample for ResumeModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py", + "description": "Sample for ListModelDeploymentMonitoringJobs", + "file": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListModelDeploymentMonitoringJobs_sync", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -20999,15 +21011,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py" }, { "canonical": true, @@ -21017,26 +21031,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.search_model_deployment_monitoring_stats_anomalies", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_nas_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", + "fullName": "google.cloud.aiplatform.v1.JobService.ListNasJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" + "shortName": "ListNasJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" - }, - { - "name": "model_deployment_monitoring_job", - "type": "str" + "type": "google.cloud.aiplatform_v1.types.ListNasJobsRequest" }, { - "name": "deployed_model_id", + "name": "parent", "type": "str" }, { @@ -21052,22 +21062,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager", - "shortName": "search_model_deployment_monitoring_stats_anomalies" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasJobsAsyncPager", + "shortName": "list_nas_jobs" }, - "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", - "file": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py", + "description": "Sample for ListNasJobs", + "file": "aiplatform_v1_generated_job_service_list_nas_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async", + "regionTag": "aiplatform_v1_generated_JobService_ListNasJobs_async", "segments": [ { - "end": 53, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 53, + "end": 52, "start": 27, "type": "SHORT" }, @@ -21077,22 +21087,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 54, - "start": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py" + "title": "aiplatform_v1_generated_job_service_list_nas_jobs_async.py" }, { "canonical": true, @@ -21101,26 +21111,22 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.search_model_deployment_monitoring_stats_anomalies", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_nas_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", + "fullName": "google.cloud.aiplatform.v1.JobService.ListNasJobs", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" + "shortName": "ListNasJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" - }, - { - "name": "model_deployment_monitoring_job", - "type": "str" + "type": "google.cloud.aiplatform_v1.types.ListNasJobsRequest" }, { - "name": "deployed_model_id", + "name": "parent", "type": "str" }, { @@ -21136,22 +21142,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager", - "shortName": "search_model_deployment_monitoring_stats_anomalies" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasJobsPager", + "shortName": "list_nas_jobs" }, - "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", - "file": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py", + "description": "Sample for ListNasJobs", + "file": "aiplatform_v1_generated_job_service_list_nas_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListNasJobs_sync", "segments": [ { - "end": 53, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 53, + "end": 52, "start": 27, "type": "SHORT" }, @@ -21161,22 +21167,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 54, - "start": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py" + "title": "aiplatform_v1_generated_job_service_list_nas_jobs_sync.py" }, { "canonical": true, @@ -21186,27 +21192,23 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.update_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.list_nas_trial_details", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.ListNasTrialDetails", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "UpdateModelDeploymentMonitoringJob" + "shortName": "ListNasTrialDetails" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest" - }, - { - "name": "model_deployment_monitoring_job", - "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + "type": "google.cloud.aiplatform_v1.types.ListNasTrialDetailsRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "parent", + "type": "str" }, { "name": "retry", @@ -21221,22 +21223,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasTrialDetailsAsyncPager", + "shortName": "list_nas_trial_details" }, - "description": "Sample for UpdateModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py", + "description": "Sample for ListNasTrialDetails", + "file": "aiplatform_v1_generated_job_service_list_nas_trial_details_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1_generated_JobService_ListNasTrialDetails_async", "segments": [ { - "end": 59, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 52, "start": 27, "type": "SHORT" }, @@ -21246,22 +21248,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1_generated_job_service_list_nas_trial_details_async.py" }, { "canonical": true, @@ -21270,27 +21272,23 @@ "fullName": "google.cloud.aiplatform_v1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.JobServiceClient.update_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.list_nas_trial_details", "method": { - "fullName": "google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1.JobService.ListNasTrialDetails", "service": { "fullName": "google.cloud.aiplatform.v1.JobService", "shortName": "JobService" }, - "shortName": "UpdateModelDeploymentMonitoringJob" + "shortName": "ListNasTrialDetails" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest" - }, - { - "name": "model_deployment_monitoring_job", - "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + "type": "google.cloud.aiplatform_v1.types.ListNasTrialDetailsRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "parent", + "type": "str" }, { "name": "retry", @@ -21305,22 +21303,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.ListNasTrialDetailsPager", + "shortName": "list_nas_trial_details" }, - "description": "Sample for UpdateModelDeploymentMonitoringJob", - "file": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py", + "description": "Sample for ListNasTrialDetails", + "file": "aiplatform_v1_generated_job_service_list_nas_trial_details_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1_generated_JobService_ListNasTrialDetails_sync", "segments": [ { - "end": 59, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 52, "start": 27, "type": "SHORT" }, @@ -21330,53 +21328,49 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1_generated_job_service_list_nas_trial_details_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient", - "shortName": "LlmUtilityServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient.compute_tokens", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.pause_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.ComputeTokens", + "fullName": "google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob", "service": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", - "shortName": "LlmUtilityService" + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" }, - "shortName": "ComputeTokens" + "shortName": "PauseModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ComputeTokensRequest" + "type": "google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest" }, { - "name": "endpoint", + "name": "name", "type": "str" }, - { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -21390,22 +21384,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ComputeTokensResponse", - "shortName": "compute_tokens" + "shortName": "pause_model_deployment_monitoring_job" }, - "description": "Sample for ComputeTokens", - "file": "aiplatform_v1_generated_llm_utility_service_compute_tokens_async.py", + "description": "Sample for PauseModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_LlmUtilityService_ComputeTokens_async", + "regionTag": "aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_async", "segments": [ { - "end": 55, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 49, "start": 27, "type": "SHORT" }, @@ -21415,52 +21408,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_llm_utility_service_compute_tokens_async.py" + "title": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient", - "shortName": "LlmUtilityServiceClient" + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient.compute_tokens", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.pause_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.ComputeTokens", + "fullName": "google.cloud.aiplatform.v1.JobService.PauseModelDeploymentMonitoringJob", "service": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", - "shortName": "LlmUtilityService" + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" }, - "shortName": "ComputeTokens" + "shortName": "PauseModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ComputeTokensRequest" + "type": "google.cloud.aiplatform_v1.types.PauseModelDeploymentMonitoringJobRequest" }, { - "name": "endpoint", + "name": "name", "type": "str" }, - { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -21474,22 +21461,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ComputeTokensResponse", - "shortName": "compute_tokens" + "shortName": "pause_model_deployment_monitoring_job" }, - "description": "Sample for ComputeTokens", - "file": "aiplatform_v1_generated_llm_utility_service_compute_tokens_sync.py", + "description": "Sample for PauseModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_LlmUtilityService_ComputeTokens_sync", + "regionTag": "aiplatform_v1_generated_JobService_PauseModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 55, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 49, "start": 27, "type": "SHORT" }, @@ -21499,53 +21485,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_llm_utility_service_compute_tokens_sync.py" + "title": "aiplatform_v1_generated_job_service_pause_model_deployment_monitoring_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient", - "shortName": "LlmUtilityServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient.count_tokens", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.resume_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.CountTokens", + "fullName": "google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob", "service": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", - "shortName": "LlmUtilityService" + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" }, - "shortName": "CountTokens" + "shortName": "ResumeModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CountTokensRequest" + "type": "google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest" }, { - "name": "endpoint", + "name": "name", "type": "str" }, - { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -21559,22 +21539,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.CountTokensResponse", - "shortName": "count_tokens" + "shortName": "resume_model_deployment_monitoring_job" }, - "description": "Sample for CountTokens", - "file": "aiplatform_v1_generated_llm_utility_service_count_tokens_async.py", + "description": "Sample for ResumeModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_LlmUtilityService_CountTokens_async", + "regionTag": "aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_async", "segments": [ { - "end": 60, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 49, "start": 27, "type": "SHORT" }, @@ -21584,52 +21563,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 55, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_llm_utility_service_count_tokens_async.py" + "title": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient", - "shortName": "LlmUtilityServiceClient" + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient.count_tokens", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.resume_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.CountTokens", + "fullName": "google.cloud.aiplatform.v1.JobService.ResumeModelDeploymentMonitoringJob", "service": { - "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", - "shortName": "LlmUtilityService" + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" }, - "shortName": "CountTokens" + "shortName": "ResumeModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CountTokensRequest" + "type": "google.cloud.aiplatform_v1.types.ResumeModelDeploymentMonitoringJobRequest" }, { - "name": "endpoint", + "name": "name", "type": "str" }, - { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -21643,22 +21616,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.CountTokensResponse", - "shortName": "count_tokens" + "shortName": "resume_model_deployment_monitoring_job" }, - "description": "Sample for CountTokens", - "file": "aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py", + "description": "Sample for ResumeModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_LlmUtilityService_CountTokens_sync", + "regionTag": "aiplatform_v1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 60, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 49, "start": 27, "type": "SHORT" }, @@ -21668,44 +21640,50 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 55, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py" + "title": "aiplatform_v1_generated_job_service_resume_model_deployment_monitoring_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.MatchServiceAsyncClient", - "shortName": "MatchServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MatchServiceAsyncClient.find_neighbors", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.search_model_deployment_monitoring_stats_anomalies", "method": { - "fullName": "google.cloud.aiplatform.v1.MatchService.FindNeighbors", + "fullName": "google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", "service": { - "fullName": "google.cloud.aiplatform.v1.MatchService", - "shortName": "MatchService" + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" }, - "shortName": "FindNeighbors" + "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.FindNeighborsRequest" + "type": "google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" }, { "name": "retry", @@ -21720,22 +21698,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.FindNeighborsResponse", - "shortName": "find_neighbors" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" }, - "description": "Sample for FindNeighbors", - "file": "aiplatform_v1_generated_match_service_find_neighbors_async.py", + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", + "file": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MatchService_FindNeighbors_async", + "regionTag": "aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async", "segments": [ { - "end": 51, + "end": 53, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 53, "start": 27, "type": "SHORT" }, @@ -21745,43 +21723,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 54, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_match_service_find_neighbors_async.py" + "title": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.MatchServiceClient", - "shortName": "MatchServiceClient" + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MatchServiceClient.find_neighbors", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.search_model_deployment_monitoring_stats_anomalies", "method": { - "fullName": "google.cloud.aiplatform.v1.MatchService.FindNeighbors", + "fullName": "google.cloud.aiplatform.v1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", "service": { - "fullName": "google.cloud.aiplatform.v1.MatchService", - "shortName": "MatchService" + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" }, - "shortName": "FindNeighbors" + "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.FindNeighborsRequest" + "type": "google.cloud.aiplatform_v1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "str" + }, + { + "name": "deployed_model_id", + "type": "str" }, { "name": "retry", @@ -21796,22 +21782,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.FindNeighborsResponse", - "shortName": "find_neighbors" + "resultType": "google.cloud.aiplatform_v1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" }, - "description": "Sample for FindNeighbors", - "file": "aiplatform_v1_generated_match_service_find_neighbors_sync.py", + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", + "file": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MatchService_FindNeighbors_sync", + "regionTag": "aiplatform_v1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync", "segments": [ { - "end": 51, + "end": 53, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 53, "start": 27, "type": "SHORT" }, @@ -21821,44 +21807,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 54, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_match_service_find_neighbors_sync.py" + "title": "aiplatform_v1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.MatchServiceAsyncClient", - "shortName": "MatchServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MatchServiceAsyncClient.read_index_datapoints", + "fullName": "google.cloud.aiplatform_v1.JobServiceAsyncClient.update_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.MatchService.ReadIndexDatapoints", + "fullName": "google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob", "service": { - "fullName": "google.cloud.aiplatform.v1.MatchService", - "shortName": "MatchService" + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" }, - "shortName": "ReadIndexDatapoints" + "shortName": "UpdateModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ReadIndexDatapointsRequest" + "type": "google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -21873,22 +21867,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ReadIndexDatapointsResponse", - "shortName": "read_index_datapoints" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_model_deployment_monitoring_job" }, - "description": "Sample for ReadIndexDatapoints", - "file": "aiplatform_v1_generated_match_service_read_index_datapoints_async.py", + "description": "Sample for UpdateModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MatchService_ReadIndexDatapoints_async", + "regionTag": "aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_async", "segments": [ { - "end": 51, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 59, "start": 27, "type": "SHORT" }, @@ -21898,43 +21892,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_match_service_read_index_datapoints_async.py" + "title": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.MatchServiceClient", - "shortName": "MatchServiceClient" + "fullName": "google.cloud.aiplatform_v1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MatchServiceClient.read_index_datapoints", + "fullName": "google.cloud.aiplatform_v1.JobServiceClient.update_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1.MatchService.ReadIndexDatapoints", + "fullName": "google.cloud.aiplatform.v1.JobService.UpdateModelDeploymentMonitoringJob", "service": { - "fullName": "google.cloud.aiplatform.v1.MatchService", - "shortName": "MatchService" + "fullName": "google.cloud.aiplatform.v1.JobService", + "shortName": "JobService" }, - "shortName": "ReadIndexDatapoints" + "shortName": "UpdateModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ReadIndexDatapointsRequest" + "type": "google.cloud.aiplatform_v1.types.UpdateModelDeploymentMonitoringJobRequest" + }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -21949,22 +21951,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ReadIndexDatapointsResponse", - "shortName": "read_index_datapoints" + "resultType": "google.api_core.operation.Operation", + "shortName": "update_model_deployment_monitoring_job" }, - "description": "Sample for ReadIndexDatapoints", - "file": "aiplatform_v1_generated_match_service_read_index_datapoints_sync.py", + "description": "Sample for UpdateModelDeploymentMonitoringJob", + "file": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MatchService_ReadIndexDatapoints_sync", + "regionTag": "aiplatform_v1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 51, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 59, "start": 27, "type": "SHORT" }, @@ -21974,56 +21976,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_match_service_read_index_datapoints_sync.py" + "title": "aiplatform_v1_generated_job_service_update_model_deployment_monitoring_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient", + "shortName": "LlmUtilityServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.add_context_artifacts_and_executions", + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient.compute_tokens", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions", + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.ComputeTokens", "service": { - "fullName": "google.cloud.aiplatform.v1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", + "shortName": "LlmUtilityService" }, - "shortName": "AddContextArtifactsAndExecutions" + "shortName": "ComputeTokens" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest" + "type": "google.cloud.aiplatform_v1.types.ComputeTokensRequest" }, { - "name": "context", + "name": "endpoint", "type": "str" }, { - "name": "artifacts", - "type": "MutableSequence[str]" - }, - { - "name": "executions", - "type": "MutableSequence[str]" + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" }, { "name": "retry", @@ -22038,22 +22036,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse", - "shortName": "add_context_artifacts_and_executions" + "resultType": "google.cloud.aiplatform_v1.types.ComputeTokensResponse", + "shortName": "compute_tokens" }, - "description": "Sample for AddContextArtifactsAndExecutions", - "file": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_async.py", + "description": "Sample for ComputeTokens", + "file": "aiplatform_v1_generated_llm_utility_service_compute_tokens_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_AddContextArtifactsAndExecutions_async", + "regionTag": "aiplatform_v1_generated_LlmUtilityService_ComputeTokens_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -22063,55 +22061,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 52, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_async.py" + "title": "aiplatform_v1_generated_llm_utility_service_compute_tokens_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient", + "shortName": "LlmUtilityServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.add_context_artifacts_and_executions", + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient.compute_tokens", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions", + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.ComputeTokens", "service": { - "fullName": "google.cloud.aiplatform.v1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", + "shortName": "LlmUtilityService" }, - "shortName": "AddContextArtifactsAndExecutions" + "shortName": "ComputeTokens" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest" + "type": "google.cloud.aiplatform_v1.types.ComputeTokensRequest" }, { - "name": "context", + "name": "endpoint", "type": "str" }, { - "name": "artifacts", - "type": "MutableSequence[str]" - }, - { - "name": "executions", - "type": "MutableSequence[str]" + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" }, { "name": "retry", @@ -22126,22 +22120,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse", - "shortName": "add_context_artifacts_and_executions" + "resultType": "google.cloud.aiplatform_v1.types.ComputeTokensResponse", + "shortName": "compute_tokens" }, - "description": "Sample for AddContextArtifactsAndExecutions", - "file": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_sync.py", + "description": "Sample for ComputeTokens", + "file": "aiplatform_v1_generated_llm_utility_service_compute_tokens_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_AddContextArtifactsAndExecutions_sync", + "regionTag": "aiplatform_v1_generated_LlmUtilityService_ComputeTokens_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -22151,52 +22145,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 52, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_sync.py" + "title": "aiplatform_v1_generated_llm_utility_service_compute_tokens_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient", + "shortName": "LlmUtilityServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.add_context_children", + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceAsyncClient.count_tokens", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextChildren", + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.CountTokens", "service": { - "fullName": "google.cloud.aiplatform.v1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", + "shortName": "LlmUtilityService" }, - "shortName": "AddContextChildren" + "shortName": "CountTokens" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.AddContextChildrenRequest" + "type": "google.cloud.aiplatform_v1.types.CountTokensRequest" }, { - "name": "context", + "name": "endpoint", "type": "str" }, { - "name": "child_contexts", - "type": "MutableSequence[str]" + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" }, { "name": "retry", @@ -22211,22 +22205,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.AddContextChildrenResponse", - "shortName": "add_context_children" + "resultType": "google.cloud.aiplatform_v1.types.CountTokensResponse", + "shortName": "count_tokens" }, - "description": "Sample for AddContextChildren", - "file": "aiplatform_v1_generated_metadata_service_add_context_children_async.py", + "description": "Sample for CountTokens", + "file": "aiplatform_v1_generated_llm_utility_service_count_tokens_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_AddContextChildren_async", + "regionTag": "aiplatform_v1_generated_LlmUtilityService_CountTokens_async", "segments": [ { - "end": 51, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 60, "start": 27, "type": "SHORT" }, @@ -22236,51 +22230,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 57, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_add_context_children_async.py" + "title": "aiplatform_v1_generated_llm_utility_service_count_tokens_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient", + "shortName": "LlmUtilityServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.add_context_children", + "fullName": "google.cloud.aiplatform_v1.LlmUtilityServiceClient.count_tokens", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextChildren", + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService.CountTokens", "service": { - "fullName": "google.cloud.aiplatform.v1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1.LlmUtilityService", + "shortName": "LlmUtilityService" }, - "shortName": "AddContextChildren" + "shortName": "CountTokens" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.AddContextChildrenRequest" + "type": "google.cloud.aiplatform_v1.types.CountTokensRequest" }, { - "name": "context", + "name": "endpoint", "type": "str" }, { - "name": "child_contexts", - "type": "MutableSequence[str]" + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" }, { "name": "retry", @@ -22295,22 +22289,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.AddContextChildrenResponse", - "shortName": "add_context_children" + "resultType": "google.cloud.aiplatform_v1.types.CountTokensResponse", + "shortName": "count_tokens" }, - "description": "Sample for AddContextChildren", - "file": "aiplatform_v1_generated_metadata_service_add_context_children_sync.py", + "description": "Sample for CountTokens", + "file": "aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_AddContextChildren_sync", + "regionTag": "aiplatform_v1_generated_LlmUtilityService_CountTokens_sync", "segments": [ { - "end": 51, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 60, "start": 27, "type": "SHORT" }, @@ -22320,52 +22314,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 57, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_add_context_children_sync.py" + "title": "aiplatform_v1_generated_llm_utility_service_count_tokens_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.MatchServiceAsyncClient", + "shortName": "MatchServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.add_execution_events", + "fullName": "google.cloud.aiplatform_v1.MatchServiceAsyncClient.find_neighbors", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents", + "fullName": "google.cloud.aiplatform.v1.MatchService.FindNeighbors", "service": { - "fullName": "google.cloud.aiplatform.v1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1.MatchService", + "shortName": "MatchService" }, - "shortName": "AddExecutionEvents" + "shortName": "FindNeighbors" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.AddExecutionEventsRequest" - }, - { - "name": "execution", - "type": "str" - }, - { - "name": "events", - "type": "MutableSequence[google.cloud.aiplatform_v1.types.Event]" + "type": "google.cloud.aiplatform_v1.types.FindNeighborsRequest" }, { "name": "retry", @@ -22380,14 +22366,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.AddExecutionEventsResponse", - "shortName": "add_execution_events" + "resultType": "google.cloud.aiplatform_v1.types.FindNeighborsResponse", + "shortName": "find_neighbors" }, - "description": "Sample for AddExecutionEvents", - "file": "aiplatform_v1_generated_metadata_service_add_execution_events_async.py", + "description": "Sample for FindNeighbors", + "file": "aiplatform_v1_generated_match_service_find_neighbors_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_AddExecutionEvents_async", + "regionTag": "aiplatform_v1_generated_MatchService_FindNeighbors_async", "segments": [ { "end": 51, @@ -22420,36 +22406,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_add_execution_events_async.py" + "title": "aiplatform_v1_generated_match_service_find_neighbors_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1.MatchServiceClient", + "shortName": "MatchServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.add_execution_events", + "fullName": "google.cloud.aiplatform_v1.MatchServiceClient.find_neighbors", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents", + "fullName": "google.cloud.aiplatform.v1.MatchService.FindNeighbors", "service": { - "fullName": "google.cloud.aiplatform.v1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1.MatchService", + "shortName": "MatchService" }, - "shortName": "AddExecutionEvents" + "shortName": "FindNeighbors" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.AddExecutionEventsRequest" - }, - { - "name": "execution", - "type": "str" - }, - { - "name": "events", - "type": "MutableSequence[google.cloud.aiplatform_v1.types.Event]" + "type": "google.cloud.aiplatform_v1.types.FindNeighborsRequest" }, { "name": "retry", @@ -22464,14 +22442,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.AddExecutionEventsResponse", - "shortName": "add_execution_events" + "resultType": "google.cloud.aiplatform_v1.types.FindNeighborsResponse", + "shortName": "find_neighbors" }, - "description": "Sample for AddExecutionEvents", - "file": "aiplatform_v1_generated_metadata_service_add_execution_events_sync.py", + "description": "Sample for FindNeighbors", + "file": "aiplatform_v1_generated_match_service_find_neighbors_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_AddExecutionEvents_sync", + "regionTag": "aiplatform_v1_generated_MatchService_FindNeighbors_sync", "segments": [ { "end": 51, @@ -22504,41 +22482,29 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_add_execution_events_sync.py" + "title": "aiplatform_v1_generated_match_service_find_neighbors_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.MatchServiceAsyncClient", + "shortName": "MatchServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_artifact", + "fullName": "google.cloud.aiplatform_v1.MatchServiceAsyncClient.read_index_datapoints", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateArtifact", + "fullName": "google.cloud.aiplatform.v1.MatchService.ReadIndexDatapoints", "service": { - "fullName": "google.cloud.aiplatform.v1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1.MatchService", + "shortName": "MatchService" }, - "shortName": "CreateArtifact" + "shortName": "ReadIndexDatapoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateArtifactRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "artifact", - "type": "google.cloud.aiplatform_v1.types.Artifact" - }, - { - "name": "artifact_id", - "type": "str" + "type": "google.cloud.aiplatform_v1.types.ReadIndexDatapointsRequest" }, { "name": "retry", @@ -22553,14 +22519,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Artifact", - "shortName": "create_artifact" + "resultType": "google.cloud.aiplatform_v1.types.ReadIndexDatapointsResponse", + "shortName": "read_index_datapoints" }, - "description": "Sample for CreateArtifact", - "file": "aiplatform_v1_generated_metadata_service_create_artifact_async.py", + "description": "Sample for ReadIndexDatapoints", + "file": "aiplatform_v1_generated_match_service_read_index_datapoints_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_CreateArtifact_async", + "regionTag": "aiplatform_v1_generated_MatchService_ReadIndexDatapoints_async", "segments": [ { "end": 51, @@ -22593,40 +22559,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_create_artifact_async.py" + "title": "aiplatform_v1_generated_match_service_read_index_datapoints_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1.MatchServiceClient", + "shortName": "MatchServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_artifact", + "fullName": "google.cloud.aiplatform_v1.MatchServiceClient.read_index_datapoints", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateArtifact", + "fullName": "google.cloud.aiplatform.v1.MatchService.ReadIndexDatapoints", "service": { - "fullName": "google.cloud.aiplatform.v1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1.MatchService", + "shortName": "MatchService" }, - "shortName": "CreateArtifact" + "shortName": "ReadIndexDatapoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateArtifactRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "artifact", - "type": "google.cloud.aiplatform_v1.types.Artifact" - }, - { - "name": "artifact_id", - "type": "str" + "type": "google.cloud.aiplatform_v1.types.ReadIndexDatapointsRequest" }, { "name": "retry", @@ -22641,14 +22595,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Artifact", - "shortName": "create_artifact" + "resultType": "google.cloud.aiplatform_v1.types.ReadIndexDatapointsResponse", + "shortName": "read_index_datapoints" }, - "description": "Sample for CreateArtifact", - "file": "aiplatform_v1_generated_metadata_service_create_artifact_sync.py", + "description": "Sample for ReadIndexDatapoints", + "file": "aiplatform_v1_generated_match_service_read_index_datapoints_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_CreateArtifact_sync", + "regionTag": "aiplatform_v1_generated_MatchService_ReadIndexDatapoints_sync", "segments": [ { "end": 51, @@ -22681,7 +22635,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_create_artifact_sync.py" + "title": "aiplatform_v1_generated_match_service_read_index_datapoints_sync.py" }, { "canonical": true, @@ -22691,31 +22645,31 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_context", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.add_context_artifacts_and_executions", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateContext", + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateContext" + "shortName": "AddContextArtifactsAndExecutions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateContextRequest" + "type": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest" }, { - "name": "parent", + "name": "context", "type": "str" }, { - "name": "context", - "type": "google.cloud.aiplatform_v1.types.Context" + "name": "artifacts", + "type": "MutableSequence[str]" }, { - "name": "context_id", - "type": "str" + "name": "executions", + "type": "MutableSequence[str]" }, { "name": "retry", @@ -22730,14 +22684,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Context", - "shortName": "create_context" + "resultType": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse", + "shortName": "add_context_artifacts_and_executions" }, - "description": "Sample for CreateContext", - "file": "aiplatform_v1_generated_metadata_service_create_context_async.py", + "description": "Sample for AddContextArtifactsAndExecutions", + "file": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_CreateContext_async", + "regionTag": "aiplatform_v1_generated_MetadataService_AddContextArtifactsAndExecutions_async", "segments": [ { "end": 51, @@ -22770,7 +22724,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_create_context_async.py" + "title": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_async.py" }, { "canonical": true, @@ -22779,31 +22733,31 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_context", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.add_context_artifacts_and_executions", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateContext", + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextArtifactsAndExecutions", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateContext" + "shortName": "AddContextArtifactsAndExecutions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateContextRequest" + "type": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsRequest" }, { - "name": "parent", + "name": "context", "type": "str" }, { - "name": "context", - "type": "google.cloud.aiplatform_v1.types.Context" + "name": "artifacts", + "type": "MutableSequence[str]" }, { - "name": "context_id", - "type": "str" + "name": "executions", + "type": "MutableSequence[str]" }, { "name": "retry", @@ -22818,14 +22772,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Context", - "shortName": "create_context" + "resultType": "google.cloud.aiplatform_v1.types.AddContextArtifactsAndExecutionsResponse", + "shortName": "add_context_artifacts_and_executions" }, - "description": "Sample for CreateContext", - "file": "aiplatform_v1_generated_metadata_service_create_context_sync.py", + "description": "Sample for AddContextArtifactsAndExecutions", + "file": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_CreateContext_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_AddContextArtifactsAndExecutions_sync", "segments": [ { "end": 51, @@ -22858,7 +22812,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_create_context_sync.py" + "title": "aiplatform_v1_generated_metadata_service_add_context_artifacts_and_executions_sync.py" }, { "canonical": true, @@ -22868,31 +22822,27 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_execution", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.add_context_children", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateExecution", + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextChildren", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateExecution" + "shortName": "AddContextChildren" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateExecutionRequest" + "type": "google.cloud.aiplatform_v1.types.AddContextChildrenRequest" }, { - "name": "parent", + "name": "context", "type": "str" }, { - "name": "execution", - "type": "google.cloud.aiplatform_v1.types.Execution" - }, - { - "name": "execution_id", - "type": "str" + "name": "child_contexts", + "type": "MutableSequence[str]" }, { "name": "retry", @@ -22907,14 +22857,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Execution", - "shortName": "create_execution" + "resultType": "google.cloud.aiplatform_v1.types.AddContextChildrenResponse", + "shortName": "add_context_children" }, - "description": "Sample for CreateExecution", - "file": "aiplatform_v1_generated_metadata_service_create_execution_async.py", + "description": "Sample for AddContextChildren", + "file": "aiplatform_v1_generated_metadata_service_add_context_children_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_CreateExecution_async", + "regionTag": "aiplatform_v1_generated_MetadataService_AddContextChildren_async", "segments": [ { "end": 51, @@ -22947,7 +22897,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_create_execution_async.py" + "title": "aiplatform_v1_generated_metadata_service_add_context_children_async.py" }, { "canonical": true, @@ -22956,31 +22906,27 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_execution", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.add_context_children", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateExecution", + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddContextChildren", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateExecution" + "shortName": "AddContextChildren" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateExecutionRequest" + "type": "google.cloud.aiplatform_v1.types.AddContextChildrenRequest" }, { - "name": "parent", + "name": "context", "type": "str" }, { - "name": "execution", - "type": "google.cloud.aiplatform_v1.types.Execution" - }, - { - "name": "execution_id", - "type": "str" + "name": "child_contexts", + "type": "MutableSequence[str]" }, { "name": "retry", @@ -22995,14 +22941,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Execution", - "shortName": "create_execution" + "resultType": "google.cloud.aiplatform_v1.types.AddContextChildrenResponse", + "shortName": "add_context_children" }, - "description": "Sample for CreateExecution", - "file": "aiplatform_v1_generated_metadata_service_create_execution_sync.py", + "description": "Sample for AddContextChildren", + "file": "aiplatform_v1_generated_metadata_service_add_context_children_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_CreateExecution_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_AddContextChildren_sync", "segments": [ { "end": 51, @@ -23035,7 +22981,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_create_execution_sync.py" + "title": "aiplatform_v1_generated_metadata_service_add_context_children_sync.py" }, { "canonical": true, @@ -23045,31 +22991,27 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_metadata_schema", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.add_execution_events", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema", + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateMetadataSchema" + "shortName": "AddExecutionEvents" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest" + "type": "google.cloud.aiplatform_v1.types.AddExecutionEventsRequest" }, { - "name": "parent", + "name": "execution", "type": "str" }, { - "name": "metadata_schema", - "type": "google.cloud.aiplatform_v1.types.MetadataSchema" - }, - { - "name": "metadata_schema_id", - "type": "str" + "name": "events", + "type": "MutableSequence[google.cloud.aiplatform_v1.types.Event]" }, { "name": "retry", @@ -23084,22 +23026,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", - "shortName": "create_metadata_schema" + "resultType": "google.cloud.aiplatform_v1.types.AddExecutionEventsResponse", + "shortName": "add_execution_events" }, - "description": "Sample for CreateMetadataSchema", - "file": "aiplatform_v1_generated_metadata_service_create_metadata_schema_async.py", + "description": "Sample for AddExecutionEvents", + "file": "aiplatform_v1_generated_metadata_service_add_execution_events_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataSchema_async", + "regionTag": "aiplatform_v1_generated_MetadataService_AddExecutionEvents_async", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -23109,22 +23051,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_create_metadata_schema_async.py" + "title": "aiplatform_v1_generated_metadata_service_add_execution_events_async.py" }, { "canonical": true, @@ -23133,31 +23075,27 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_metadata_schema", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.add_execution_events", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema", + "fullName": "google.cloud.aiplatform.v1.MetadataService.AddExecutionEvents", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateMetadataSchema" + "shortName": "AddExecutionEvents" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest" + "type": "google.cloud.aiplatform_v1.types.AddExecutionEventsRequest" }, { - "name": "parent", + "name": "execution", "type": "str" }, { - "name": "metadata_schema", - "type": "google.cloud.aiplatform_v1.types.MetadataSchema" - }, - { - "name": "metadata_schema_id", - "type": "str" + "name": "events", + "type": "MutableSequence[google.cloud.aiplatform_v1.types.Event]" }, { "name": "retry", @@ -23172,22 +23110,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", - "shortName": "create_metadata_schema" + "resultType": "google.cloud.aiplatform_v1.types.AddExecutionEventsResponse", + "shortName": "add_execution_events" }, - "description": "Sample for CreateMetadataSchema", - "file": "aiplatform_v1_generated_metadata_service_create_metadata_schema_sync.py", + "description": "Sample for AddExecutionEvents", + "file": "aiplatform_v1_generated_metadata_service_add_execution_events_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataSchema_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_AddExecutionEvents_sync", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -23197,22 +23135,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_create_metadata_schema_sync.py" + "title": "aiplatform_v1_generated_metadata_service_add_execution_events_sync.py" }, { "canonical": true, @@ -23222,30 +23160,30 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_metadata_store", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore", + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateArtifact", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateMetadataStore" + "shortName": "CreateArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest" + "type": "google.cloud.aiplatform_v1.types.CreateArtifactRequest" }, { "name": "parent", "type": "str" }, { - "name": "metadata_store", - "type": "google.cloud.aiplatform_v1.types.MetadataStore" + "name": "artifact", + "type": "google.cloud.aiplatform_v1.types.Artifact" }, { - "name": "metadata_store_id", + "name": "artifact_id", "type": "str" }, { @@ -23261,22 +23199,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_metadata_store" + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "create_artifact" }, - "description": "Sample for CreateMetadataStore", - "file": "aiplatform_v1_generated_metadata_service_create_metadata_store_async.py", + "description": "Sample for CreateArtifact", + "file": "aiplatform_v1_generated_metadata_service_create_artifact_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataStore_async", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateArtifact_async", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -23291,17 +23229,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_create_metadata_store_async.py" + "title": "aiplatform_v1_generated_metadata_service_create_artifact_async.py" }, { "canonical": true, @@ -23310,30 +23248,30 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_metadata_store", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore", + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateArtifact", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateMetadataStore" + "shortName": "CreateArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest" + "type": "google.cloud.aiplatform_v1.types.CreateArtifactRequest" }, { "name": "parent", "type": "str" }, { - "name": "metadata_store", - "type": "google.cloud.aiplatform_v1.types.MetadataStore" + "name": "artifact", + "type": "google.cloud.aiplatform_v1.types.Artifact" }, { - "name": "metadata_store_id", + "name": "artifact_id", "type": "str" }, { @@ -23349,22 +23287,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_metadata_store" + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "create_artifact" }, - "description": "Sample for CreateMetadataStore", - "file": "aiplatform_v1_generated_metadata_service_create_metadata_store_sync.py", + "description": "Sample for CreateArtifact", + "file": "aiplatform_v1_generated_metadata_service_create_artifact_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataStore_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateArtifact_sync", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -23379,17 +23317,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_create_metadata_store_sync.py" + "title": "aiplatform_v1_generated_metadata_service_create_artifact_sync.py" }, { "canonical": true, @@ -23399,22 +23337,30 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_artifact", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_context", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteArtifact", + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateContext", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteArtifact" + "shortName": "CreateContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteArtifactRequest" + "type": "google.cloud.aiplatform_v1.types.CreateContextRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1.types.Context" + }, + { + "name": "context_id", "type": "str" }, { @@ -23430,22 +23376,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_artifact" + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "create_context" }, - "description": "Sample for DeleteArtifact", - "file": "aiplatform_v1_generated_metadata_service_delete_artifact_async.py", + "description": "Sample for CreateContext", + "file": "aiplatform_v1_generated_metadata_service_create_context_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_DeleteArtifact_async", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateContext_async", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -23460,17 +23406,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_delete_artifact_async.py" + "title": "aiplatform_v1_generated_metadata_service_create_context_async.py" }, { "canonical": true, @@ -23479,22 +23425,30 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_artifact", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_context", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteArtifact", + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateContext", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteArtifact" + "shortName": "CreateContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteArtifactRequest" + "type": "google.cloud.aiplatform_v1.types.CreateContextRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1.types.Context" + }, + { + "name": "context_id", "type": "str" }, { @@ -23510,22 +23464,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_artifact" + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "create_context" }, - "description": "Sample for DeleteArtifact", - "file": "aiplatform_v1_generated_metadata_service_delete_artifact_sync.py", + "description": "Sample for CreateContext", + "file": "aiplatform_v1_generated_metadata_service_create_context_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_DeleteArtifact_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateContext_sync", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -23540,17 +23494,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_delete_artifact_sync.py" + "title": "aiplatform_v1_generated_metadata_service_create_context_sync.py" }, { "canonical": true, @@ -23560,22 +23514,30 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_context", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_execution", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteContext", + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateExecution", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteContext" + "shortName": "CreateExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteContextRequest" + "type": "google.cloud.aiplatform_v1.types.CreateExecutionRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1.types.Execution" + }, + { + "name": "execution_id", "type": "str" }, { @@ -23591,22 +23553,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_context" + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "create_execution" }, - "description": "Sample for DeleteContext", - "file": "aiplatform_v1_generated_metadata_service_delete_context_async.py", + "description": "Sample for CreateExecution", + "file": "aiplatform_v1_generated_metadata_service_create_execution_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_DeleteContext_async", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateExecution_async", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -23621,17 +23583,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_delete_context_async.py" + "title": "aiplatform_v1_generated_metadata_service_create_execution_async.py" }, { "canonical": true, @@ -23640,22 +23602,30 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_context", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_execution", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteContext", + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateExecution", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteContext" + "shortName": "CreateExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteContextRequest" + "type": "google.cloud.aiplatform_v1.types.CreateExecutionRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1.types.Execution" + }, + { + "name": "execution_id", "type": "str" }, { @@ -23671,22 +23641,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_context" + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "create_execution" }, - "description": "Sample for DeleteContext", - "file": "aiplatform_v1_generated_metadata_service_delete_context_sync.py", + "description": "Sample for CreateExecution", + "file": "aiplatform_v1_generated_metadata_service_create_execution_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_DeleteContext_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateExecution_sync", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -23701,17 +23671,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_delete_context_sync.py" + "title": "aiplatform_v1_generated_metadata_service_create_execution_sync.py" }, { "canonical": true, @@ -23721,22 +23691,30 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_execution", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_metadata_schema", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteExecution", + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteExecution" + "shortName": "CreateMetadataSchema" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteExecutionRequest" + "type": "google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "metadata_schema", + "type": "google.cloud.aiplatform_v1.types.MetadataSchema" + }, + { + "name": "metadata_schema_id", "type": "str" }, { @@ -23752,14 +23730,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_execution" + "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", + "shortName": "create_metadata_schema" }, - "description": "Sample for DeleteExecution", - "file": "aiplatform_v1_generated_metadata_service_delete_execution_async.py", + "description": "Sample for CreateMetadataSchema", + "file": "aiplatform_v1_generated_metadata_service_create_metadata_schema_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_DeleteExecution_async", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataSchema_async", "segments": [ { "end": 55, @@ -23777,13 +23755,13 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { "end": 52, - "start": 46, + "start": 50, "type": "REQUEST_EXECUTION" }, { @@ -23792,7 +23770,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_delete_execution_async.py" + "title": "aiplatform_v1_generated_metadata_service_create_metadata_schema_async.py" }, { "canonical": true, @@ -23801,22 +23779,30 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_execution", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_metadata_schema", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteExecution", + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataSchema", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteExecution" + "shortName": "CreateMetadataSchema" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteExecutionRequest" + "type": "google.cloud.aiplatform_v1.types.CreateMetadataSchemaRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "metadata_schema", + "type": "google.cloud.aiplatform_v1.types.MetadataSchema" + }, + { + "name": "metadata_schema_id", "type": "str" }, { @@ -23832,14 +23818,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_execution" + "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", + "shortName": "create_metadata_schema" }, - "description": "Sample for DeleteExecution", - "file": "aiplatform_v1_generated_metadata_service_delete_execution_sync.py", + "description": "Sample for CreateMetadataSchema", + "file": "aiplatform_v1_generated_metadata_service_create_metadata_schema_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_DeleteExecution_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataSchema_sync", "segments": [ { "end": 55, @@ -23857,13 +23843,13 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { "end": 52, - "start": 46, + "start": 50, "type": "REQUEST_EXECUTION" }, { @@ -23872,7 +23858,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_delete_execution_sync.py" + "title": "aiplatform_v1_generated_metadata_service_create_metadata_schema_sync.py" }, { "canonical": true, @@ -23882,22 +23868,30 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_metadata_store", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.create_metadata_store", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore", + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteMetadataStore" + "shortName": "CreateMetadataStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest" + "type": "google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "metadata_store", + "type": "google.cloud.aiplatform_v1.types.MetadataStore" + }, + { + "name": "metadata_store_id", "type": "str" }, { @@ -23914,13 +23908,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_metadata_store" + "shortName": "create_metadata_store" }, - "description": "Sample for DeleteMetadataStore", - "file": "aiplatform_v1_generated_metadata_service_delete_metadata_store_async.py", + "description": "Sample for CreateMetadataStore", + "file": "aiplatform_v1_generated_metadata_service_create_metadata_store_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_DeleteMetadataStore_async", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataStore_async", "segments": [ { "end": 55, @@ -23953,7 +23947,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_delete_metadata_store_async.py" + "title": "aiplatform_v1_generated_metadata_service_create_metadata_store_async.py" }, { "canonical": true, @@ -23962,22 +23956,30 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_metadata_store", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.create_metadata_store", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore", + "fullName": "google.cloud.aiplatform.v1.MetadataService.CreateMetadataStore", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteMetadataStore" + "shortName": "CreateMetadataStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest" + "type": "google.cloud.aiplatform_v1.types.CreateMetadataStoreRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "metadata_store", + "type": "google.cloud.aiplatform_v1.types.MetadataStore" + }, + { + "name": "metadata_store_id", "type": "str" }, { @@ -23994,13 +23996,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_metadata_store" + "shortName": "create_metadata_store" }, - "description": "Sample for DeleteMetadataStore", - "file": "aiplatform_v1_generated_metadata_service_delete_metadata_store_sync.py", + "description": "Sample for CreateMetadataStore", + "file": "aiplatform_v1_generated_metadata_service_create_metadata_store_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_DeleteMetadataStore_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_CreateMetadataStore_sync", "segments": [ { "end": 55, @@ -24033,7 +24035,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_delete_metadata_store_sync.py" + "title": "aiplatform_v1_generated_metadata_service_create_metadata_store_sync.py" }, { "canonical": true, @@ -24043,19 +24045,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_artifact", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.GetArtifact", + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteArtifact", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "GetArtifact" + "shortName": "DeleteArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetArtifactRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteArtifactRequest" }, { "name": "name", @@ -24074,22 +24076,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Artifact", - "shortName": "get_artifact" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_artifact" }, - "description": "Sample for GetArtifact", - "file": "aiplatform_v1_generated_metadata_service_get_artifact_async.py", + "description": "Sample for DeleteArtifact", + "file": "aiplatform_v1_generated_metadata_service_delete_artifact_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_GetArtifact_async", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteArtifact_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -24104,17 +24106,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_get_artifact_async.py" + "title": "aiplatform_v1_generated_metadata_service_delete_artifact_async.py" }, { "canonical": true, @@ -24123,19 +24125,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_artifact", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.GetArtifact", + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteArtifact", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "GetArtifact" + "shortName": "DeleteArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetArtifactRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteArtifactRequest" }, { "name": "name", @@ -24154,22 +24156,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Artifact", - "shortName": "get_artifact" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_artifact" }, - "description": "Sample for GetArtifact", - "file": "aiplatform_v1_generated_metadata_service_get_artifact_sync.py", + "description": "Sample for DeleteArtifact", + "file": "aiplatform_v1_generated_metadata_service_delete_artifact_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_GetArtifact_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteArtifact_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -24184,17 +24186,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_get_artifact_sync.py" + "title": "aiplatform_v1_generated_metadata_service_delete_artifact_sync.py" }, { "canonical": true, @@ -24204,19 +24206,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_context", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_context", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.GetContext", + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteContext", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "GetContext" + "shortName": "DeleteContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetContextRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteContextRequest" }, { "name": "name", @@ -24235,22 +24237,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Context", - "shortName": "get_context" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_context" }, - "description": "Sample for GetContext", - "file": "aiplatform_v1_generated_metadata_service_get_context_async.py", + "description": "Sample for DeleteContext", + "file": "aiplatform_v1_generated_metadata_service_delete_context_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_GetContext_async", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteContext_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -24265,17 +24267,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_get_context_async.py" + "title": "aiplatform_v1_generated_metadata_service_delete_context_async.py" }, { "canonical": true, @@ -24284,19 +24286,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_context", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_context", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.GetContext", + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteContext", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "GetContext" + "shortName": "DeleteContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetContextRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteContextRequest" }, { "name": "name", @@ -24315,22 +24317,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Context", - "shortName": "get_context" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_context" }, - "description": "Sample for GetContext", - "file": "aiplatform_v1_generated_metadata_service_get_context_sync.py", + "description": "Sample for DeleteContext", + "file": "aiplatform_v1_generated_metadata_service_delete_context_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_GetContext_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteContext_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -24345,17 +24347,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_get_context_sync.py" + "title": "aiplatform_v1_generated_metadata_service_delete_context_sync.py" }, { "canonical": true, @@ -24365,19 +24367,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_execution", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_execution", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.GetExecution", + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteExecution", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "GetExecution" + "shortName": "DeleteExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetExecutionRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteExecutionRequest" }, { "name": "name", @@ -24396,22 +24398,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Execution", - "shortName": "get_execution" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_execution" }, - "description": "Sample for GetExecution", - "file": "aiplatform_v1_generated_metadata_service_get_execution_async.py", + "description": "Sample for DeleteExecution", + "file": "aiplatform_v1_generated_metadata_service_delete_execution_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_GetExecution_async", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteExecution_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -24426,17 +24428,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_get_execution_async.py" + "title": "aiplatform_v1_generated_metadata_service_delete_execution_async.py" }, { "canonical": true, @@ -24445,19 +24447,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_execution", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_execution", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.GetExecution", + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteExecution", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "GetExecution" + "shortName": "DeleteExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetExecutionRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteExecutionRequest" }, { "name": "name", @@ -24476,22 +24478,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Execution", - "shortName": "get_execution" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_execution" }, - "description": "Sample for GetExecution", - "file": "aiplatform_v1_generated_metadata_service_get_execution_sync.py", + "description": "Sample for DeleteExecution", + "file": "aiplatform_v1_generated_metadata_service_delete_execution_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_GetExecution_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteExecution_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -24506,17 +24508,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_get_execution_sync.py" + "title": "aiplatform_v1_generated_metadata_service_delete_execution_sync.py" }, { "canonical": true, @@ -24526,19 +24528,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_metadata_schema", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.delete_metadata_store", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema", + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "GetMetadataSchema" + "shortName": "DeleteMetadataStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest" }, { "name": "name", @@ -24557,22 +24559,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", - "shortName": "get_metadata_schema" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_metadata_store" }, - "description": "Sample for GetMetadataSchema", - "file": "aiplatform_v1_generated_metadata_service_get_metadata_schema_async.py", + "description": "Sample for DeleteMetadataStore", + "file": "aiplatform_v1_generated_metadata_service_delete_metadata_store_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataSchema_async", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteMetadataStore_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -24587,17 +24589,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_get_metadata_schema_async.py" + "title": "aiplatform_v1_generated_metadata_service_delete_metadata_store_async.py" }, { "canonical": true, @@ -24606,19 +24608,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_metadata_schema", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.delete_metadata_store", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema", + "fullName": "google.cloud.aiplatform.v1.MetadataService.DeleteMetadataStore", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "GetMetadataSchema" + "shortName": "DeleteMetadataStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteMetadataStoreRequest" }, { "name": "name", @@ -24637,22 +24639,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", - "shortName": "get_metadata_schema" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_metadata_store" }, - "description": "Sample for GetMetadataSchema", - "file": "aiplatform_v1_generated_metadata_service_get_metadata_schema_sync.py", + "description": "Sample for DeleteMetadataStore", + "file": "aiplatform_v1_generated_metadata_service_delete_metadata_store_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataSchema_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_DeleteMetadataStore_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -24667,17 +24669,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_get_metadata_schema_sync.py" + "title": "aiplatform_v1_generated_metadata_service_delete_metadata_store_sync.py" }, { "canonical": true, @@ -24687,19 +24689,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_metadata_store", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataStore", + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetArtifact", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "GetMetadataStore" + "shortName": "GetArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetMetadataStoreRequest" + "type": "google.cloud.aiplatform_v1.types.GetArtifactRequest" }, { "name": "name", @@ -24718,14 +24720,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.MetadataStore", - "shortName": "get_metadata_store" + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "get_artifact" }, - "description": "Sample for GetMetadataStore", - "file": "aiplatform_v1_generated_metadata_service_get_metadata_store_async.py", + "description": "Sample for GetArtifact", + "file": "aiplatform_v1_generated_metadata_service_get_artifact_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataStore_async", + "regionTag": "aiplatform_v1_generated_MetadataService_GetArtifact_async", "segments": [ { "end": 51, @@ -24758,7 +24760,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_get_metadata_store_async.py" + "title": "aiplatform_v1_generated_metadata_service_get_artifact_async.py" }, { "canonical": true, @@ -24767,19 +24769,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_metadata_store", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataStore", + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetArtifact", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "GetMetadataStore" + "shortName": "GetArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetMetadataStoreRequest" + "type": "google.cloud.aiplatform_v1.types.GetArtifactRequest" }, { "name": "name", @@ -24798,14 +24800,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.MetadataStore", - "shortName": "get_metadata_store" + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "get_artifact" }, - "description": "Sample for GetMetadataStore", - "file": "aiplatform_v1_generated_metadata_service_get_metadata_store_sync.py", + "description": "Sample for GetArtifact", + "file": "aiplatform_v1_generated_metadata_service_get_artifact_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataStore_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_GetArtifact_sync", "segments": [ { "end": 51, @@ -24838,7 +24840,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_get_metadata_store_sync.py" + "title": "aiplatform_v1_generated_metadata_service_get_artifact_sync.py" }, { "canonical": true, @@ -24848,22 +24850,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_artifacts", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_context", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.ListArtifacts", + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetContext", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "ListArtifacts" + "shortName": "GetContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListArtifactsRequest" + "type": "google.cloud.aiplatform_v1.types.GetContextRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -24879,22 +24881,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsAsyncPager", - "shortName": "list_artifacts" + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "get_context" }, - "description": "Sample for ListArtifacts", - "file": "aiplatform_v1_generated_metadata_service_list_artifacts_async.py", + "description": "Sample for GetContext", + "file": "aiplatform_v1_generated_metadata_service_get_context_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_ListArtifacts_async", + "regionTag": "aiplatform_v1_generated_MetadataService_GetContext_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -24914,12 +24916,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_list_artifacts_async.py" + "title": "aiplatform_v1_generated_metadata_service_get_context_async.py" }, { "canonical": true, @@ -24928,22 +24930,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_artifacts", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_context", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.ListArtifacts", + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetContext", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "ListArtifacts" + "shortName": "GetContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListArtifactsRequest" + "type": "google.cloud.aiplatform_v1.types.GetContextRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -24959,22 +24961,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsPager", - "shortName": "list_artifacts" + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "get_context" }, - "description": "Sample for ListArtifacts", - "file": "aiplatform_v1_generated_metadata_service_list_artifacts_sync.py", + "description": "Sample for GetContext", + "file": "aiplatform_v1_generated_metadata_service_get_context_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_ListArtifacts_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_GetContext_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -24994,12 +24996,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_list_artifacts_sync.py" + "title": "aiplatform_v1_generated_metadata_service_get_context_sync.py" }, { "canonical": true, @@ -25009,22 +25011,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_contexts", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_execution", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.ListContexts", + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetExecution", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "ListContexts" + "shortName": "GetExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListContextsRequest" + "type": "google.cloud.aiplatform_v1.types.GetExecutionRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25040,22 +25042,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsAsyncPager", - "shortName": "list_contexts" + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "get_execution" }, - "description": "Sample for ListContexts", - "file": "aiplatform_v1_generated_metadata_service_list_contexts_async.py", + "description": "Sample for GetExecution", + "file": "aiplatform_v1_generated_metadata_service_get_execution_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_ListContexts_async", + "regionTag": "aiplatform_v1_generated_MetadataService_GetExecution_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25075,12 +25077,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_list_contexts_async.py" + "title": "aiplatform_v1_generated_metadata_service_get_execution_async.py" }, { "canonical": true, @@ -25089,22 +25091,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_contexts", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_execution", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.ListContexts", + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetExecution", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "ListContexts" + "shortName": "GetExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListContextsRequest" + "type": "google.cloud.aiplatform_v1.types.GetExecutionRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25120,22 +25122,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsPager", - "shortName": "list_contexts" + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "get_execution" }, - "description": "Sample for ListContexts", - "file": "aiplatform_v1_generated_metadata_service_list_contexts_sync.py", + "description": "Sample for GetExecution", + "file": "aiplatform_v1_generated_metadata_service_get_execution_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_ListContexts_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_GetExecution_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25155,12 +25157,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_list_contexts_sync.py" + "title": "aiplatform_v1_generated_metadata_service_get_execution_sync.py" }, { "canonical": true, @@ -25170,22 +25172,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_executions", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_metadata_schema", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.ListExecutions", + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "ListExecutions" + "shortName": "GetMetadataSchema" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListExecutionsRequest" + "type": "google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25201,22 +25203,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsAsyncPager", - "shortName": "list_executions" + "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", + "shortName": "get_metadata_schema" }, - "description": "Sample for ListExecutions", - "file": "aiplatform_v1_generated_metadata_service_list_executions_async.py", + "description": "Sample for GetMetadataSchema", + "file": "aiplatform_v1_generated_metadata_service_get_metadata_schema_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_ListExecutions_async", + "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataSchema_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25236,12 +25238,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_list_executions_async.py" + "title": "aiplatform_v1_generated_metadata_service_get_metadata_schema_async.py" }, { "canonical": true, @@ -25250,22 +25252,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_executions", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_metadata_schema", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.ListExecutions", + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataSchema", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "ListExecutions" + "shortName": "GetMetadataSchema" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListExecutionsRequest" + "type": "google.cloud.aiplatform_v1.types.GetMetadataSchemaRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25281,22 +25283,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsPager", - "shortName": "list_executions" + "resultType": "google.cloud.aiplatform_v1.types.MetadataSchema", + "shortName": "get_metadata_schema" }, - "description": "Sample for ListExecutions", - "file": "aiplatform_v1_generated_metadata_service_list_executions_sync.py", + "description": "Sample for GetMetadataSchema", + "file": "aiplatform_v1_generated_metadata_service_get_metadata_schema_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_ListExecutions_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataSchema_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25316,12 +25318,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_list_executions_sync.py" + "title": "aiplatform_v1_generated_metadata_service_get_metadata_schema_sync.py" }, { "canonical": true, @@ -25331,22 +25333,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_metadata_schemas", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.get_metadata_store", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas", + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataStore", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "ListMetadataSchemas" + "shortName": "GetMetadataStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest" + "type": "google.cloud.aiplatform_v1.types.GetMetadataStoreRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25362,22 +25364,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager", - "shortName": "list_metadata_schemas" + "resultType": "google.cloud.aiplatform_v1.types.MetadataStore", + "shortName": "get_metadata_store" }, - "description": "Sample for ListMetadataSchemas", - "file": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_async.py", + "description": "Sample for GetMetadataStore", + "file": "aiplatform_v1_generated_metadata_service_get_metadata_store_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataSchemas_async", + "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataStore_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25397,12 +25399,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_async.py" + "title": "aiplatform_v1_generated_metadata_service_get_metadata_store_async.py" }, { "canonical": true, @@ -25411,22 +25413,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_metadata_schemas", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.get_metadata_store", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas", + "fullName": "google.cloud.aiplatform.v1.MetadataService.GetMetadataStore", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "ListMetadataSchemas" + "shortName": "GetMetadataStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest" + "type": "google.cloud.aiplatform_v1.types.GetMetadataStoreRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25442,22 +25444,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasPager", - "shortName": "list_metadata_schemas" + "resultType": "google.cloud.aiplatform_v1.types.MetadataStore", + "shortName": "get_metadata_store" }, - "description": "Sample for ListMetadataSchemas", - "file": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_sync.py", + "description": "Sample for GetMetadataStore", + "file": "aiplatform_v1_generated_metadata_service_get_metadata_store_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataSchemas_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_GetMetadataStore_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25477,12 +25479,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_sync.py" + "title": "aiplatform_v1_generated_metadata_service_get_metadata_store_sync.py" }, { "canonical": true, @@ -25492,19 +25494,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_metadata_stores", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_artifacts", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataStores", + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListArtifacts", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "ListMetadataStores" + "shortName": "ListArtifacts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListMetadataStoresRequest" + "type": "google.cloud.aiplatform_v1.types.ListArtifactsRequest" }, { "name": "parent", @@ -25523,14 +25525,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresAsyncPager", - "shortName": "list_metadata_stores" + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsAsyncPager", + "shortName": "list_artifacts" }, - "description": "Sample for ListMetadataStores", - "file": "aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py", + "description": "Sample for ListArtifacts", + "file": "aiplatform_v1_generated_metadata_service_list_artifacts_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataStores_async", + "regionTag": "aiplatform_v1_generated_MetadataService_ListArtifacts_async", "segments": [ { "end": 52, @@ -25563,7 +25565,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py" + "title": "aiplatform_v1_generated_metadata_service_list_artifacts_async.py" }, { "canonical": true, @@ -25572,19 +25574,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_metadata_stores", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_artifacts", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataStores", + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListArtifacts", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "ListMetadataStores" + "shortName": "ListArtifacts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListMetadataStoresRequest" + "type": "google.cloud.aiplatform_v1.types.ListArtifactsRequest" }, { "name": "parent", @@ -25603,14 +25605,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresPager", - "shortName": "list_metadata_stores" + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListArtifactsPager", + "shortName": "list_artifacts" }, - "description": "Sample for ListMetadataStores", - "file": "aiplatform_v1_generated_metadata_service_list_metadata_stores_sync.py", + "description": "Sample for ListArtifacts", + "file": "aiplatform_v1_generated_metadata_service_list_artifacts_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataStores_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_ListArtifacts_sync", "segments": [ { "end": 52, @@ -25643,7 +25645,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_list_metadata_stores_sync.py" + "title": "aiplatform_v1_generated_metadata_service_list_artifacts_sync.py" }, { "canonical": true, @@ -25653,19 +25655,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.purge_artifacts", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_contexts", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts", + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListContexts", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "PurgeArtifacts" + "shortName": "ListContexts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.PurgeArtifactsRequest" + "type": "google.cloud.aiplatform_v1.types.ListContextsRequest" }, { "name": "parent", @@ -25684,22 +25686,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "purge_artifacts" + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsAsyncPager", + "shortName": "list_contexts" }, - "description": "Sample for PurgeArtifacts", - "file": "aiplatform_v1_generated_metadata_service_purge_artifacts_async.py", + "description": "Sample for ListContexts", + "file": "aiplatform_v1_generated_metadata_service_list_contexts_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_PurgeArtifacts_async", + "regionTag": "aiplatform_v1_generated_MetadataService_ListContexts_async", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -25709,22 +25711,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_purge_artifacts_async.py" + "title": "aiplatform_v1_generated_metadata_service_list_contexts_async.py" }, { "canonical": true, @@ -25733,19 +25735,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.purge_artifacts", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_contexts", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts", + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListContexts", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "PurgeArtifacts" + "shortName": "ListContexts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.PurgeArtifactsRequest" + "type": "google.cloud.aiplatform_v1.types.ListContextsRequest" }, { "name": "parent", @@ -25764,22 +25766,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "purge_artifacts" + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListContextsPager", + "shortName": "list_contexts" }, - "description": "Sample for PurgeArtifacts", - "file": "aiplatform_v1_generated_metadata_service_purge_artifacts_sync.py", + "description": "Sample for ListContexts", + "file": "aiplatform_v1_generated_metadata_service_list_contexts_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_PurgeArtifacts_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_ListContexts_sync", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -25789,22 +25791,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_purge_artifacts_sync.py" + "title": "aiplatform_v1_generated_metadata_service_list_contexts_sync.py" }, { "canonical": true, @@ -25814,19 +25816,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.purge_contexts", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_executions", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeContexts", + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListExecutions", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "PurgeContexts" + "shortName": "ListExecutions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.PurgeContextsRequest" + "type": "google.cloud.aiplatform_v1.types.ListExecutionsRequest" }, { "name": "parent", @@ -25845,22 +25847,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "purge_contexts" + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsAsyncPager", + "shortName": "list_executions" }, - "description": "Sample for PurgeContexts", - "file": "aiplatform_v1_generated_metadata_service_purge_contexts_async.py", + "description": "Sample for ListExecutions", + "file": "aiplatform_v1_generated_metadata_service_list_executions_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_PurgeContexts_async", + "regionTag": "aiplatform_v1_generated_MetadataService_ListExecutions_async", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -25870,22 +25872,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_purge_contexts_async.py" + "title": "aiplatform_v1_generated_metadata_service_list_executions_async.py" }, { "canonical": true, @@ -25894,19 +25896,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.purge_contexts", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_executions", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeContexts", + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListExecutions", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "PurgeContexts" + "shortName": "ListExecutions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.PurgeContextsRequest" + "type": "google.cloud.aiplatform_v1.types.ListExecutionsRequest" }, { "name": "parent", @@ -25925,22 +25927,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "purge_contexts" + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListExecutionsPager", + "shortName": "list_executions" }, - "description": "Sample for PurgeContexts", - "file": "aiplatform_v1_generated_metadata_service_purge_contexts_sync.py", + "description": "Sample for ListExecutions", + "file": "aiplatform_v1_generated_metadata_service_list_executions_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_PurgeContexts_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_ListExecutions_sync", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -25950,22 +25952,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_purge_contexts_sync.py" + "title": "aiplatform_v1_generated_metadata_service_list_executions_sync.py" }, { "canonical": true, @@ -25975,19 +25977,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.purge_executions", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_metadata_schemas", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeExecutions", + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "PurgeExecutions" + "shortName": "ListMetadataSchemas" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.PurgeExecutionsRequest" + "type": "google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest" }, { "name": "parent", @@ -26006,22 +26008,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "purge_executions" + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager", + "shortName": "list_metadata_schemas" }, - "description": "Sample for PurgeExecutions", - "file": "aiplatform_v1_generated_metadata_service_purge_executions_async.py", + "description": "Sample for ListMetadataSchemas", + "file": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_PurgeExecutions_async", + "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataSchemas_async", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26031,22 +26033,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_purge_executions_async.py" + "title": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_async.py" }, { "canonical": true, @@ -26055,19 +26057,19 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.purge_executions", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_metadata_schemas", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeExecutions", + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataSchemas", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "PurgeExecutions" + "shortName": "ListMetadataSchemas" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.PurgeExecutionsRequest" + "type": "google.cloud.aiplatform_v1.types.ListMetadataSchemasRequest" }, { "name": "parent", @@ -26086,22 +26088,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "purge_executions" + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataSchemasPager", + "shortName": "list_metadata_schemas" }, - "description": "Sample for PurgeExecutions", - "file": "aiplatform_v1_generated_metadata_service_purge_executions_sync.py", + "description": "Sample for ListMetadataSchemas", + "file": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_PurgeExecutions_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataSchemas_sync", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26111,22 +26113,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_purge_executions_sync.py" + "title": "aiplatform_v1_generated_metadata_service_list_metadata_schemas_sync.py" }, { "canonical": true, @@ -26136,22 +26138,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.query_artifact_lineage_subgraph", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.list_metadata_stores", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph", + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataStores", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "QueryArtifactLineageSubgraph" + "shortName": "ListMetadataStores" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest" + "type": "google.cloud.aiplatform_v1.types.ListMetadataStoresRequest" }, { - "name": "artifact", + "name": "parent", "type": "str" }, { @@ -26167,22 +26169,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", - "shortName": "query_artifact_lineage_subgraph" + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresAsyncPager", + "shortName": "list_metadata_stores" }, - "description": "Sample for QueryArtifactLineageSubgraph", - "file": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_async.py", + "description": "Sample for ListMetadataStores", + "file": "aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_QueryArtifactLineageSubgraph_async", + "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataStores_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26202,12 +26204,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_async.py" + "title": "aiplatform_v1_generated_metadata_service_list_metadata_stores_async.py" }, { "canonical": true, @@ -26216,22 +26218,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.query_artifact_lineage_subgraph", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.list_metadata_stores", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph", + "fullName": "google.cloud.aiplatform.v1.MetadataService.ListMetadataStores", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "QueryArtifactLineageSubgraph" + "shortName": "ListMetadataStores" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest" + "type": "google.cloud.aiplatform_v1.types.ListMetadataStoresRequest" }, { - "name": "artifact", + "name": "parent", "type": "str" }, { @@ -26247,22 +26249,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", - "shortName": "query_artifact_lineage_subgraph" + "resultType": "google.cloud.aiplatform_v1.services.metadata_service.pagers.ListMetadataStoresPager", + "shortName": "list_metadata_stores" }, - "description": "Sample for QueryArtifactLineageSubgraph", - "file": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py", + "description": "Sample for ListMetadataStores", + "file": "aiplatform_v1_generated_metadata_service_list_metadata_stores_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_QueryArtifactLineageSubgraph_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_ListMetadataStores_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26282,12 +26284,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py" + "title": "aiplatform_v1_generated_metadata_service_list_metadata_stores_sync.py" }, { "canonical": true, @@ -26297,22 +26299,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.query_context_lineage_subgraph", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.purge_artifacts", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph", + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "QueryContextLineageSubgraph" + "shortName": "PurgeArtifacts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest" + "type": "google.cloud.aiplatform_v1.types.PurgeArtifactsRequest" }, { - "name": "context", + "name": "parent", "type": "str" }, { @@ -26328,22 +26330,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", - "shortName": "query_context_lineage_subgraph" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_artifacts" }, - "description": "Sample for QueryContextLineageSubgraph", - "file": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_async.py", + "description": "Sample for PurgeArtifacts", + "file": "aiplatform_v1_generated_metadata_service_purge_artifacts_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_QueryContextLineageSubgraph_async", + "regionTag": "aiplatform_v1_generated_MetadataService_PurgeArtifacts_async", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -26353,22 +26355,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_async.py" + "title": "aiplatform_v1_generated_metadata_service_purge_artifacts_async.py" }, { "canonical": true, @@ -26377,22 +26379,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.query_context_lineage_subgraph", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.purge_artifacts", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph", + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeArtifacts", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "QueryContextLineageSubgraph" + "shortName": "PurgeArtifacts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest" + "type": "google.cloud.aiplatform_v1.types.PurgeArtifactsRequest" }, { - "name": "context", + "name": "parent", "type": "str" }, { @@ -26408,22 +26410,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", - "shortName": "query_context_lineage_subgraph" + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_artifacts" }, - "description": "Sample for QueryContextLineageSubgraph", - "file": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_sync.py", + "description": "Sample for PurgeArtifacts", + "file": "aiplatform_v1_generated_metadata_service_purge_artifacts_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_QueryContextLineageSubgraph_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_PurgeArtifacts_sync", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -26433,22 +26435,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_sync.py" + "title": "aiplatform_v1_generated_metadata_service_purge_artifacts_sync.py" }, { "canonical": true, @@ -26458,22 +26460,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.query_execution_inputs_and_outputs", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.purge_contexts", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs", + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeContexts", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "QueryExecutionInputsAndOutputs" + "shortName": "PurgeContexts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest" + "type": "google.cloud.aiplatform_v1.types.PurgeContextsRequest" }, { - "name": "execution", + "name": "parent", "type": "str" }, { @@ -26489,22 +26491,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", - "shortName": "query_execution_inputs_and_outputs" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_contexts" }, - "description": "Sample for QueryExecutionInputsAndOutputs", - "file": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_async.py", + "description": "Sample for PurgeContexts", + "file": "aiplatform_v1_generated_metadata_service_purge_contexts_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_QueryExecutionInputsAndOutputs_async", + "regionTag": "aiplatform_v1_generated_MetadataService_PurgeContexts_async", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -26514,22 +26516,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_async.py" + "title": "aiplatform_v1_generated_metadata_service_purge_contexts_async.py" }, { "canonical": true, @@ -26538,22 +26540,22 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.query_execution_inputs_and_outputs", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.purge_contexts", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs", + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeContexts", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "QueryExecutionInputsAndOutputs" + "shortName": "PurgeContexts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest" + "type": "google.cloud.aiplatform_v1.types.PurgeContextsRequest" }, { - "name": "execution", + "name": "parent", "type": "str" }, { @@ -26569,22 +26571,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", - "shortName": "query_execution_inputs_and_outputs" + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_contexts" }, - "description": "Sample for QueryExecutionInputsAndOutputs", - "file": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py", + "description": "Sample for PurgeContexts", + "file": "aiplatform_v1_generated_metadata_service_purge_contexts_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_QueryExecutionInputsAndOutputs_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_PurgeContexts_sync", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -26594,22 +26596,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py" + "title": "aiplatform_v1_generated_metadata_service_purge_contexts_sync.py" }, { "canonical": true, @@ -26619,28 +26621,24 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.remove_context_children", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.purge_executions", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.RemoveContextChildren", + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeExecutions", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "RemoveContextChildren" + "shortName": "PurgeExecutions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.RemoveContextChildrenRequest" + "type": "google.cloud.aiplatform_v1.types.PurgeExecutionsRequest" }, { - "name": "context", + "name": "parent", "type": "str" }, - { - "name": "child_contexts", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -26654,22 +26652,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.RemoveContextChildrenResponse", - "shortName": "remove_context_children" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_executions" }, - "description": "Sample for RemoveContextChildren", - "file": "aiplatform_v1_generated_metadata_service_remove_context_children_async.py", + "description": "Sample for PurgeExecutions", + "file": "aiplatform_v1_generated_metadata_service_purge_executions_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_RemoveContextChildren_async", + "regionTag": "aiplatform_v1_generated_MetadataService_PurgeExecutions_async", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -26679,22 +26677,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_remove_context_children_async.py" + "title": "aiplatform_v1_generated_metadata_service_purge_executions_async.py" }, { "canonical": true, @@ -26703,28 +26701,24 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.remove_context_children", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.purge_executions", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.RemoveContextChildren", + "fullName": "google.cloud.aiplatform.v1.MetadataService.PurgeExecutions", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "RemoveContextChildren" + "shortName": "PurgeExecutions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.RemoveContextChildrenRequest" + "type": "google.cloud.aiplatform_v1.types.PurgeExecutionsRequest" }, { - "name": "context", + "name": "parent", "type": "str" }, - { - "name": "child_contexts", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -26738,22 +26732,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.RemoveContextChildrenResponse", - "shortName": "remove_context_children" + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_executions" }, - "description": "Sample for RemoveContextChildren", - "file": "aiplatform_v1_generated_metadata_service_remove_context_children_sync.py", + "description": "Sample for PurgeExecutions", + "file": "aiplatform_v1_generated_metadata_service_purge_executions_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_RemoveContextChildren_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_PurgeExecutions_sync", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -26763,22 +26757,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_remove_context_children_sync.py" + "title": "aiplatform_v1_generated_metadata_service_purge_executions_sync.py" }, { "canonical": true, @@ -26788,27 +26782,23 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.update_artifact", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.query_artifact_lineage_subgraph", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateArtifact", + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "UpdateArtifact" + "shortName": "QueryArtifactLineageSubgraph" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateArtifactRequest" + "type": "google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest" }, { "name": "artifact", - "type": "google.cloud.aiplatform_v1.types.Artifact" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "type": "str" }, { "name": "retry", @@ -26823,22 +26813,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Artifact", - "shortName": "update_artifact" + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_artifact_lineage_subgraph" }, - "description": "Sample for UpdateArtifact", - "file": "aiplatform_v1_generated_metadata_service_update_artifact_async.py", + "description": "Sample for QueryArtifactLineageSubgraph", + "file": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_UpdateArtifact_async", + "regionTag": "aiplatform_v1_generated_MetadataService_QueryArtifactLineageSubgraph_async", "segments": [ { - "end": 50, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 51, "start": 27, "type": "SHORT" }, @@ -26848,22 +26838,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_update_artifact_async.py" + "title": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_async.py" }, { "canonical": true, @@ -26872,27 +26862,23 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.update_artifact", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.query_artifact_lineage_subgraph", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateArtifact", + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryArtifactLineageSubgraph", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "UpdateArtifact" + "shortName": "QueryArtifactLineageSubgraph" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateArtifactRequest" + "type": "google.cloud.aiplatform_v1.types.QueryArtifactLineageSubgraphRequest" }, { "name": "artifact", - "type": "google.cloud.aiplatform_v1.types.Artifact" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "type": "str" }, { "name": "retry", @@ -26907,22 +26893,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Artifact", - "shortName": "update_artifact" + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_artifact_lineage_subgraph" }, - "description": "Sample for UpdateArtifact", - "file": "aiplatform_v1_generated_metadata_service_update_artifact_sync.py", + "description": "Sample for QueryArtifactLineageSubgraph", + "file": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_UpdateArtifact_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_QueryArtifactLineageSubgraph_sync", "segments": [ { - "end": 50, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 51, "start": 27, "type": "SHORT" }, @@ -26932,22 +26918,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_update_artifact_sync.py" + "title": "aiplatform_v1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py" }, { "canonical": true, @@ -26957,27 +26943,23 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.update_context", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.query_context_lineage_subgraph", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateContext", + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "UpdateContext" + "shortName": "QueryContextLineageSubgraph" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateContextRequest" + "type": "google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest" }, { "name": "context", - "type": "google.cloud.aiplatform_v1.types.Context" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "type": "str" }, { "name": "retry", @@ -26992,22 +26974,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Context", - "shortName": "update_context" + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_context_lineage_subgraph" }, - "description": "Sample for UpdateContext", - "file": "aiplatform_v1_generated_metadata_service_update_context_async.py", + "description": "Sample for QueryContextLineageSubgraph", + "file": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_UpdateContext_async", + "regionTag": "aiplatform_v1_generated_MetadataService_QueryContextLineageSubgraph_async", "segments": [ { - "end": 50, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 51, "start": 27, "type": "SHORT" }, @@ -27017,22 +26999,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_update_context_async.py" + "title": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_async.py" }, { "canonical": true, @@ -27041,27 +27023,23 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.update_context", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.query_context_lineage_subgraph", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateContext", + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryContextLineageSubgraph", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "UpdateContext" + "shortName": "QueryContextLineageSubgraph" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateContextRequest" + "type": "google.cloud.aiplatform_v1.types.QueryContextLineageSubgraphRequest" }, { "name": "context", - "type": "google.cloud.aiplatform_v1.types.Context" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "type": "str" }, { "name": "retry", @@ -27076,22 +27054,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Context", - "shortName": "update_context" + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_context_lineage_subgraph" }, - "description": "Sample for UpdateContext", - "file": "aiplatform_v1_generated_metadata_service_update_context_sync.py", + "description": "Sample for QueryContextLineageSubgraph", + "file": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_UpdateContext_sync", + "regionTag": "aiplatform_v1_generated_MetadataService_QueryContextLineageSubgraph_sync", "segments": [ { - "end": 50, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 51, "start": 27, "type": "SHORT" }, @@ -27101,22 +27079,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_update_context_sync.py" + "title": "aiplatform_v1_generated_metadata_service_query_context_lineage_subgraph_sync.py" }, { "canonical": true, @@ -27126,27 +27104,23 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.update_execution", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.query_execution_inputs_and_outputs", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateExecution", + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "UpdateExecution" + "shortName": "QueryExecutionInputsAndOutputs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateExecutionRequest" + "type": "google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest" }, { "name": "execution", - "type": "google.cloud.aiplatform_v1.types.Execution" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "type": "str" }, { "name": "retry", @@ -27161,22 +27135,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Execution", - "shortName": "update_execution" + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_execution_inputs_and_outputs" }, - "description": "Sample for UpdateExecution", - "file": "aiplatform_v1_generated_metadata_service_update_execution_async.py", + "description": "Sample for QueryExecutionInputsAndOutputs", + "file": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_UpdateExecution_async", + "regionTag": "aiplatform_v1_generated_MetadataService_QueryExecutionInputsAndOutputs_async", "segments": [ { - "end": 50, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 51, "start": 27, "type": "SHORT" }, @@ -27186,22 +27160,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_update_execution_async.py" + "title": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_async.py" }, { "canonical": true, @@ -27210,27 +27184,3323 @@ "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.update_execution", + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.query_execution_inputs_and_outputs", "method": { - "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateExecution", + "fullName": "google.cloud.aiplatform.v1.MetadataService.QueryExecutionInputsAndOutputs", "service": { "fullName": "google.cloud.aiplatform.v1.MetadataService", "shortName": "MetadataService" }, - "shortName": "UpdateExecution" + "shortName": "QueryExecutionInputsAndOutputs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateExecutionRequest" + "type": "google.cloud.aiplatform_v1.types.QueryExecutionInputsAndOutputsRequest" }, { "name": "execution", - "type": "google.cloud.aiplatform_v1.types.Execution" + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.LineageSubgraph", + "shortName": "query_execution_inputs_and_outputs" + }, + "description": "Sample for QueryExecutionInputsAndOutputs", + "file": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_QueryExecutionInputsAndOutputs_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.remove_context_children", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.RemoveContextChildren", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "RemoveContextChildren" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.RemoveContextChildrenRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "child_contexts", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.RemoveContextChildrenResponse", + "shortName": "remove_context_children" + }, + "description": "Sample for RemoveContextChildren", + "file": "aiplatform_v1_generated_metadata_service_remove_context_children_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_RemoveContextChildren_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_remove_context_children_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.remove_context_children", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.RemoveContextChildren", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "RemoveContextChildren" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.RemoveContextChildrenRequest" + }, + { + "name": "context", + "type": "str" + }, + { + "name": "child_contexts", + "type": "MutableSequence[str]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.RemoveContextChildrenResponse", + "shortName": "remove_context_children" + }, + "description": "Sample for RemoveContextChildren", + "file": "aiplatform_v1_generated_metadata_service_remove_context_children_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_RemoveContextChildren_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_remove_context_children_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.update_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateArtifactRequest" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1.types.Artifact" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "update_artifact" + }, + "description": "Sample for UpdateArtifact", + "file": "aiplatform_v1_generated_metadata_service_update_artifact_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_UpdateArtifact_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_update_artifact_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.update_artifact", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateArtifact", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateArtifact" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateArtifactRequest" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1.types.Artifact" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Artifact", + "shortName": "update_artifact" + }, + "description": "Sample for UpdateArtifact", + "file": "aiplatform_v1_generated_metadata_service_update_artifact_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_UpdateArtifact_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_update_artifact_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.update_context", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateContext", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateContextRequest" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1.types.Context" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "update_context" + }, + "description": "Sample for UpdateContext", + "file": "aiplatform_v1_generated_metadata_service_update_context_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_UpdateContext_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_update_context_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.update_context", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateContext", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateContext" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateContextRequest" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1.types.Context" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Context", + "shortName": "update_context" + }, + "description": "Sample for UpdateContext", + "file": "aiplatform_v1_generated_metadata_service_update_context_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_UpdateContext_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_update_context_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceAsyncClient.update_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateExecutionRequest" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1.types.Execution" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "update_execution" + }, + "description": "Sample for UpdateExecution", + "file": "aiplatform_v1_generated_metadata_service_update_execution_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_UpdateExecution_async", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_update_execution_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient", + "shortName": "MetadataServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MetadataServiceClient.update_execution", + "method": { + "fullName": "google.cloud.aiplatform.v1.MetadataService.UpdateExecution", + "service": { + "fullName": "google.cloud.aiplatform.v1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "UpdateExecution" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.UpdateExecutionRequest" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1.types.Execution" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Execution", + "shortName": "update_execution" + }, + "description": "Sample for UpdateExecution", + "file": "aiplatform_v1_generated_metadata_service_update_execution_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MetadataService_UpdateExecution_sync", + "segments": [ + { + "end": 50, + "start": 27, + "type": "FULL" + }, + { + "end": 50, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 47, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 51, + "start": 48, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_metadata_service_update_execution_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient", + "shortName": "MigrationServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient.batch_migrate_resources", + "method": { + "fullName": "google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources", + "service": { + "fullName": "google.cloud.aiplatform.v1.MigrationService", + "shortName": "MigrationService" + }, + "shortName": "BatchMigrateResources" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "migrate_resource_requests", + "type": "MutableSequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_migrate_resources" + }, + "description": "Sample for BatchMigrateResources", + "file": "aiplatform_v1_generated_migration_service_batch_migrate_resources_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MigrationService_BatchMigrateResources_async", + "segments": [ + { + "end": 61, + "start": 27, + "type": "FULL" + }, + { + "end": 61, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 51, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 52, + "type": "REQUEST_EXECUTION" + }, + { + "end": 62, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_migration_service_batch_migrate_resources_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient", + "shortName": "MigrationServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient.batch_migrate_resources", + "method": { + "fullName": "google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources", + "service": { + "fullName": "google.cloud.aiplatform.v1.MigrationService", + "shortName": "MigrationService" + }, + "shortName": "BatchMigrateResources" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "migrate_resource_requests", + "type": "MutableSequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_migrate_resources" + }, + "description": "Sample for BatchMigrateResources", + "file": "aiplatform_v1_generated_migration_service_batch_migrate_resources_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MigrationService_BatchMigrateResources_sync", + "segments": [ + { + "end": 61, + "start": 27, + "type": "FULL" + }, + { + "end": 61, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 51, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 52, + "type": "REQUEST_EXECUTION" + }, + { + "end": 62, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_migration_service_batch_migrate_resources_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient", + "shortName": "MigrationServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient.search_migratable_resources", + "method": { + "fullName": "google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources", + "service": { + "fullName": "google.cloud.aiplatform.v1.MigrationService", + "shortName": "MigrationService" + }, + "shortName": "SearchMigratableResources" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager", + "shortName": "search_migratable_resources" + }, + "description": "Sample for SearchMigratableResources", + "file": "aiplatform_v1_generated_migration_service_search_migratable_resources_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MigrationService_SearchMigratableResources_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_migration_service_search_migratable_resources_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient", + "shortName": "MigrationServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient.search_migratable_resources", + "method": { + "fullName": "google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources", + "service": { + "fullName": "google.cloud.aiplatform.v1.MigrationService", + "shortName": "MigrationService" + }, + "shortName": "SearchMigratableResources" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesPager", + "shortName": "search_migratable_resources" + }, + "description": "Sample for SearchMigratableResources", + "file": "aiplatform_v1_generated_migration_service_search_migratable_resources_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_MigrationService_SearchMigratableResources_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_migration_service_search_migratable_resources_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelGardenServiceAsyncClient", + "shortName": "ModelGardenServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelGardenServiceAsyncClient.get_publisher_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelGardenService.GetPublisherModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelGardenService", + "shortName": "ModelGardenService" + }, + "shortName": "GetPublisherModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetPublisherModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PublisherModel", + "shortName": "get_publisher_model" + }, + "description": "Sample for GetPublisherModel", + "file": "aiplatform_v1_generated_model_garden_service_get_publisher_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelGardenService_GetPublisherModel_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_garden_service_get_publisher_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelGardenServiceClient", + "shortName": "ModelGardenServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelGardenServiceClient.get_publisher_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelGardenService.GetPublisherModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelGardenService", + "shortName": "ModelGardenService" + }, + "shortName": "GetPublisherModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetPublisherModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.PublisherModel", + "shortName": "get_publisher_model" + }, + "description": "Sample for GetPublisherModel", + "file": "aiplatform_v1_generated_model_garden_service_get_publisher_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelGardenService_GetPublisherModel_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_garden_service_get_publisher_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.batch_import_evaluated_annotations", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.BatchImportEvaluatedAnnotations", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "BatchImportEvaluatedAnnotations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchImportEvaluatedAnnotationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "evaluated_annotations", + "type": "MutableSequence[google.cloud.aiplatform_v1.types.EvaluatedAnnotation]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchImportEvaluatedAnnotationsResponse", + "shortName": "batch_import_evaluated_annotations" + }, + "description": "Sample for BatchImportEvaluatedAnnotations", + "file": "aiplatform_v1_generated_model_service_batch_import_evaluated_annotations_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_BatchImportEvaluatedAnnotations_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_batch_import_evaluated_annotations_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.batch_import_evaluated_annotations", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.BatchImportEvaluatedAnnotations", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "BatchImportEvaluatedAnnotations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchImportEvaluatedAnnotationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "evaluated_annotations", + "type": "MutableSequence[google.cloud.aiplatform_v1.types.EvaluatedAnnotation]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchImportEvaluatedAnnotationsResponse", + "shortName": "batch_import_evaluated_annotations" + }, + "description": "Sample for BatchImportEvaluatedAnnotations", + "file": "aiplatform_v1_generated_model_service_batch_import_evaluated_annotations_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_BatchImportEvaluatedAnnotations_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_batch_import_evaluated_annotations_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.batch_import_model_evaluation_slices", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.BatchImportModelEvaluationSlices", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "BatchImportModelEvaluationSlices" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchImportModelEvaluationSlicesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_evaluation_slices", + "type": "MutableSequence[google.cloud.aiplatform_v1.types.ModelEvaluationSlice]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchImportModelEvaluationSlicesResponse", + "shortName": "batch_import_model_evaluation_slices" + }, + "description": "Sample for BatchImportModelEvaluationSlices", + "file": "aiplatform_v1_generated_model_service_batch_import_model_evaluation_slices_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_BatchImportModelEvaluationSlices_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_batch_import_model_evaluation_slices_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.batch_import_model_evaluation_slices", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.BatchImportModelEvaluationSlices", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "BatchImportModelEvaluationSlices" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.BatchImportModelEvaluationSlicesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_evaluation_slices", + "type": "MutableSequence[google.cloud.aiplatform_v1.types.ModelEvaluationSlice]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.BatchImportModelEvaluationSlicesResponse", + "shortName": "batch_import_model_evaluation_slices" + }, + "description": "Sample for BatchImportModelEvaluationSlices", + "file": "aiplatform_v1_generated_model_service_batch_import_model_evaluation_slices_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_BatchImportModelEvaluationSlices_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_batch_import_model_evaluation_slices_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.copy_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.CopyModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "CopyModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CopyModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "source_model", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "copy_model" + }, + "description": "Sample for CopyModel", + "file": "aiplatform_v1_generated_model_service_copy_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_CopyModel_async", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_copy_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.copy_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.CopyModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "CopyModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.CopyModelRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "source_model", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "copy_model" + }, + "description": "Sample for CopyModel", + "file": "aiplatform_v1_generated_model_service_copy_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_CopyModel_sync", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 47, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 54, + "start": 48, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 55, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_copy_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.delete_model_version", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModelVersion", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteModelVersion" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelVersionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model_version" + }, + "description": "Sample for DeleteModelVersion", + "file": "aiplatform_v1_generated_model_service_delete_model_version_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_DeleteModelVersion_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_delete_model_version_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.delete_model_version", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModelVersion", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteModelVersion" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelVersionRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model_version" + }, + "description": "Sample for DeleteModelVersion", + "file": "aiplatform_v1_generated_model_service_delete_model_version_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_DeleteModelVersion_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_delete_model_version_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.delete_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model" + }, + "description": "Sample for DeleteModel", + "file": "aiplatform_v1_generated_model_service_delete_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_DeleteModel_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_delete_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.delete_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "DeleteModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.DeleteModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model" + }, + "description": "Sample for DeleteModel", + "file": "aiplatform_v1_generated_model_service_delete_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_DeleteModel_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_delete_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.export_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ExportModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ExportModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_model" + }, + "description": "Sample for ExportModel", + "file": "aiplatform_v1_generated_model_service_export_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ExportModel_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_export_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.export_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ExportModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ExportModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ExportModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "output_config", + "type": "google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "export_model" + }, + "description": "Sample for ExportModel", + "file": "aiplatform_v1_generated_model_service_export_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ExportModel_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_export_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.get_model_evaluation_slice", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluationSlice" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluationSlice", + "shortName": "get_model_evaluation_slice" + }, + "description": "Sample for GetModelEvaluationSlice", + "file": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.get_model_evaluation_slice", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluationSlice" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluationSlice", + "shortName": "get_model_evaluation_slice" + }, + "description": "Sample for GetModelEvaluationSlice", + "file": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.get_model_evaluation", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluation", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", + "shortName": "get_model_evaluation" + }, + "description": "Sample for GetModelEvaluation", + "file": "aiplatform_v1_generated_model_service_get_model_evaluation_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluation_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_get_model_evaluation_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.get_model_evaluation", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluation", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", + "shortName": "get_model_evaluation" + }, + "description": "Sample for GetModelEvaluation", + "file": "aiplatform_v1_generated_model_service_get_model_evaluation_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluation_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_get_model_evaluation_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.get_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "aiplatform_v1_generated_model_service_get_model_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_GetModel_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_get_model_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.get_model", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.GetModel", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "GetModel" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.GetModelRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "get_model" + }, + "description": "Sample for GetModel", + "file": "aiplatform_v1_generated_model_service_get_model_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_GetModel_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_get_model_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.import_model_evaluation", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ImportModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportModelEvaluationRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_evaluation", + "type": "google.cloud.aiplatform_v1.types.ModelEvaluation" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", + "shortName": "import_model_evaluation" + }, + "description": "Sample for ImportModelEvaluation", + "file": "aiplatform_v1_generated_model_service_import_model_evaluation_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ImportModelEvaluation_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_import_model_evaluation_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.import_model_evaluation", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ImportModelEvaluation" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ImportModelEvaluationRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "model_evaluation", + "type": "google.cloud.aiplatform_v1.types.ModelEvaluation" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", + "shortName": "import_model_evaluation" + }, + "description": "Sample for ImportModelEvaluation", + "file": "aiplatform_v1_generated_model_service_import_model_evaluation_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ImportModelEvaluation_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_import_model_evaluation_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_model_evaluation_slices", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluationSlices" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager", + "shortName": "list_model_evaluation_slices" + }, + "description": "Sample for ListModelEvaluationSlices", + "file": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluationSlices_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_model_evaluation_slices", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluationSlices" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesPager", + "shortName": "list_model_evaluation_slices" + }, + "description": "Sample for ListModelEvaluationSlices", + "file": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluationSlices_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_model_evaluations", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluations", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsAsyncPager", + "shortName": "list_model_evaluations" + }, + "description": "Sample for ListModelEvaluations", + "file": "aiplatform_v1_generated_model_service_list_model_evaluations_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluations_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_model_evaluations_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_model_evaluations", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluations", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelEvaluations" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsPager", + "shortName": "list_model_evaluations" + }, + "description": "Sample for ListModelEvaluations", + "file": "aiplatform_v1_generated_model_service_list_model_evaluations_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluations_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_model_evaluations_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_model_versions", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelVersions", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelVersions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelVersionsRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelVersionsAsyncPager", + "shortName": "list_model_versions" + }, + "description": "Sample for ListModelVersions", + "file": "aiplatform_v1_generated_model_service_list_model_versions_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1_generated_ModelService_ListModelVersions_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1_generated_model_service_list_model_versions_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_model_versions", + "method": { + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelVersions", + "service": { + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" + }, + "shortName": "ListModelVersions" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1.types.ListModelVersionsRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -27245,22 +30515,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Execution", - "shortName": "update_execution" + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelVersionsPager", + "shortName": "list_model_versions" }, - "description": "Sample for UpdateExecution", - "file": "aiplatform_v1_generated_metadata_service_update_execution_sync.py", + "description": "Sample for ListModelVersions", + "file": "aiplatform_v1_generated_model_service_list_model_versions_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MetadataService_UpdateExecution_sync", + "regionTag": "aiplatform_v1_generated_ModelService_ListModelVersions_sync", "segments": [ { - "end": 50, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 52, "start": 27, "type": "SHORT" }, @@ -27270,53 +30540,49 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_metadata_service_update_execution_sync.py" + "title": "aiplatform_v1_generated_model_service_list_model_versions_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient", - "shortName": "MigrationServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient.batch_migrate_resources", + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_models", "method": { - "fullName": "google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources", + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModels", "service": { - "fullName": "google.cloud.aiplatform.v1.MigrationService", - "shortName": "MigrationService" + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" }, - "shortName": "BatchMigrateResources" + "shortName": "ListModels" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest" + "type": "google.cloud.aiplatform_v1.types.ListModelsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "migrate_resource_requests", - "type": "MutableSequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -27330,22 +30596,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "batch_migrate_resources" + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsAsyncPager", + "shortName": "list_models" }, - "description": "Sample for BatchMigrateResources", - "file": "aiplatform_v1_generated_migration_service_batch_migrate_resources_async.py", + "description": "Sample for ListModels", + "file": "aiplatform_v1_generated_model_service_list_models_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MigrationService_BatchMigrateResources_async", + "regionTag": "aiplatform_v1_generated_ModelService_ListModels_async", "segments": [ { - "end": 61, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 61, + "end": 52, "start": 27, "type": "SHORT" }, @@ -27355,52 +30621,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 52, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 62, - "start": 59, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_migration_service_batch_migrate_resources_async.py" + "title": "aiplatform_v1_generated_model_service_list_models_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient", - "shortName": "MigrationServiceClient" + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient.batch_migrate_resources", + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_models", "method": { - "fullName": "google.cloud.aiplatform.v1.MigrationService.BatchMigrateResources", + "fullName": "google.cloud.aiplatform.v1.ModelService.ListModels", "service": { - "fullName": "google.cloud.aiplatform.v1.MigrationService", - "shortName": "MigrationService" + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" }, - "shortName": "BatchMigrateResources" + "shortName": "ListModels" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.BatchMigrateResourcesRequest" + "type": "google.cloud.aiplatform_v1.types.ListModelsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "migrate_resource_requests", - "type": "MutableSequence[google.cloud.aiplatform_v1.types.MigrateResourceRequest]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -27414,22 +30676,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "batch_migrate_resources" + "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsPager", + "shortName": "list_models" }, - "description": "Sample for BatchMigrateResources", - "file": "aiplatform_v1_generated_migration_service_batch_migrate_resources_sync.py", + "description": "Sample for ListModels", + "file": "aiplatform_v1_generated_model_service_list_models_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MigrationService_BatchMigrateResources_sync", + "regionTag": "aiplatform_v1_generated_ModelService_ListModels_sync", "segments": [ { - "end": 61, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 61, + "end": 52, "start": 27, "type": "SHORT" }, @@ -27439,49 +30701,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 52, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 62, - "start": 59, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_migration_service_batch_migrate_resources_sync.py" + "title": "aiplatform_v1_generated_model_service_list_models_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient", - "shortName": "MigrationServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.MigrationServiceAsyncClient.search_migratable_resources", + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.merge_version_aliases", "method": { - "fullName": "google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources", + "fullName": "google.cloud.aiplatform.v1.ModelService.MergeVersionAliases", "service": { - "fullName": "google.cloud.aiplatform.v1.MigrationService", - "shortName": "MigrationService" + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" }, - "shortName": "SearchMigratableResources" + "shortName": "MergeVersionAliases" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest" + "type": "google.cloud.aiplatform_v1.types.MergeVersionAliasesRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, + { + "name": "version_aliases", + "type": "MutableSequence[str]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -27495,14 +30761,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager", - "shortName": "search_migratable_resources" + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "merge_version_aliases" }, - "description": "Sample for SearchMigratableResources", - "file": "aiplatform_v1_generated_migration_service_search_migratable_resources_async.py", + "description": "Sample for MergeVersionAliases", + "file": "aiplatform_v1_generated_model_service_merge_version_aliases_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MigrationService_SearchMigratableResources_async", + "regionTag": "aiplatform_v1_generated_ModelService_MergeVersionAliases_async", "segments": [ { "end": 52, @@ -27520,48 +30786,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { "end": 53, - "start": 49, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_migration_service_search_migratable_resources_async.py" + "title": "aiplatform_v1_generated_model_service_merge_version_aliases_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient", - "shortName": "MigrationServiceClient" + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.MigrationServiceClient.search_migratable_resources", + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.merge_version_aliases", "method": { - "fullName": "google.cloud.aiplatform.v1.MigrationService.SearchMigratableResources", + "fullName": "google.cloud.aiplatform.v1.ModelService.MergeVersionAliases", "service": { - "fullName": "google.cloud.aiplatform.v1.MigrationService", - "shortName": "MigrationService" + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" }, - "shortName": "SearchMigratableResources" + "shortName": "MergeVersionAliases" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.SearchMigratableResourcesRequest" + "type": "google.cloud.aiplatform_v1.types.MergeVersionAliasesRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, + { + "name": "version_aliases", + "type": "MutableSequence[str]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -27575,14 +30845,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.migration_service.pagers.SearchMigratableResourcesPager", - "shortName": "search_migratable_resources" + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "merge_version_aliases" }, - "description": "Sample for SearchMigratableResources", - "file": "aiplatform_v1_generated_migration_service_search_migratable_resources_sync.py", + "description": "Sample for MergeVersionAliases", + "file": "aiplatform_v1_generated_model_service_merge_version_aliases_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_MigrationService_SearchMigratableResources_sync", + "regionTag": "aiplatform_v1_generated_ModelService_MergeVersionAliases_sync", "segments": [ { "end": 52, @@ -27600,47 +30870,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { "end": 53, - "start": 49, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_migration_service_search_migratable_resources_sync.py" + "title": "aiplatform_v1_generated_model_service_merge_version_aliases_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelGardenServiceAsyncClient", - "shortName": "ModelGardenServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelGardenServiceAsyncClient.get_publisher_model", + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.update_explanation_dataset", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelGardenService.GetPublisherModel", + "fullName": "google.cloud.aiplatform.v1.ModelService.UpdateExplanationDataset", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelGardenService", - "shortName": "ModelGardenService" + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" }, - "shortName": "GetPublisherModel" + "shortName": "UpdateExplanationDataset" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetPublisherModelRequest" + "type": "google.cloud.aiplatform_v1.types.UpdateExplanationDatasetRequest" }, { - "name": "name", + "name": "model", "type": "str" }, { @@ -27656,22 +30926,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.PublisherModel", - "shortName": "get_publisher_model" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_explanation_dataset" }, - "description": "Sample for GetPublisherModel", - "file": "aiplatform_v1_generated_model_garden_service_get_publisher_model_async.py", + "description": "Sample for UpdateExplanationDataset", + "file": "aiplatform_v1_generated_model_service_update_explanation_dataset_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelGardenService_GetPublisherModel_async", + "regionTag": "aiplatform_v1_generated_ModelService_UpdateExplanationDataset_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -27686,41 +30956,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_garden_service_get_publisher_model_async.py" + "title": "aiplatform_v1_generated_model_service_update_explanation_dataset_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelGardenServiceClient", - "shortName": "ModelGardenServiceClient" + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelGardenServiceClient.get_publisher_model", + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.update_explanation_dataset", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelGardenService.GetPublisherModel", + "fullName": "google.cloud.aiplatform.v1.ModelService.UpdateExplanationDataset", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelGardenService", - "shortName": "ModelGardenService" + "fullName": "google.cloud.aiplatform.v1.ModelService", + "shortName": "ModelService" }, - "shortName": "GetPublisherModel" + "shortName": "UpdateExplanationDataset" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetPublisherModelRequest" + "type": "google.cloud.aiplatform_v1.types.UpdateExplanationDatasetRequest" }, { - "name": "name", + "name": "model", "type": "str" }, { @@ -27736,22 +31006,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.PublisherModel", - "shortName": "get_publisher_model" + "resultType": "google.api_core.operation.Operation", + "shortName": "update_explanation_dataset" }, - "description": "Sample for GetPublisherModel", - "file": "aiplatform_v1_generated_model_garden_service_get_publisher_model_sync.py", + "description": "Sample for UpdateExplanationDataset", + "file": "aiplatform_v1_generated_model_service_update_explanation_dataset_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelGardenService_GetPublisherModel_sync", + "regionTag": "aiplatform_v1_generated_ModelService_UpdateExplanationDataset_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -27766,17 +31036,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_garden_service_get_publisher_model_sync.py" + "title": "aiplatform_v1_generated_model_service_update_explanation_dataset_sync.py" }, { "canonical": true, @@ -27786,27 +31056,27 @@ "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.batch_import_evaluated_annotations", + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.update_model", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.BatchImportEvaluatedAnnotations", + "fullName": "google.cloud.aiplatform.v1.ModelService.UpdateModel", "service": { "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, - "shortName": "BatchImportEvaluatedAnnotations" + "shortName": "UpdateModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.BatchImportEvaluatedAnnotationsRequest" + "type": "google.cloud.aiplatform_v1.types.UpdateModelRequest" }, { - "name": "parent", - "type": "str" + "name": "model", + "type": "google.cloud.aiplatform_v1.types.Model" }, { - "name": "evaluated_annotations", - "type": "MutableSequence[google.cloud.aiplatform_v1.types.EvaluatedAnnotation]" + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -27821,22 +31091,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.BatchImportEvaluatedAnnotationsResponse", - "shortName": "batch_import_evaluated_annotations" + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "update_model" }, - "description": "Sample for BatchImportEvaluatedAnnotations", - "file": "aiplatform_v1_generated_model_service_batch_import_evaluated_annotations_async.py", + "description": "Sample for UpdateModel", + "file": "aiplatform_v1_generated_model_service_update_model_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_BatchImportEvaluatedAnnotations_async", + "regionTag": "aiplatform_v1_generated_ModelService_UpdateModel_async", "segments": [ { - "end": 51, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 54, "start": 27, "type": "SHORT" }, @@ -27846,22 +31116,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 51, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_batch_import_evaluated_annotations_async.py" + "title": "aiplatform_v1_generated_model_service_update_model_async.py" }, { "canonical": true, @@ -27870,27 +31140,27 @@ "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.batch_import_evaluated_annotations", + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.update_model", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.BatchImportEvaluatedAnnotations", + "fullName": "google.cloud.aiplatform.v1.ModelService.UpdateModel", "service": { "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, - "shortName": "BatchImportEvaluatedAnnotations" + "shortName": "UpdateModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.BatchImportEvaluatedAnnotationsRequest" + "type": "google.cloud.aiplatform_v1.types.UpdateModelRequest" }, { - "name": "parent", - "type": "str" + "name": "model", + "type": "google.cloud.aiplatform_v1.types.Model" }, { - "name": "evaluated_annotations", - "type": "MutableSequence[google.cloud.aiplatform_v1.types.EvaluatedAnnotation]" + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -27905,22 +31175,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.BatchImportEvaluatedAnnotationsResponse", - "shortName": "batch_import_evaluated_annotations" + "resultType": "google.cloud.aiplatform_v1.types.Model", + "shortName": "update_model" }, - "description": "Sample for BatchImportEvaluatedAnnotations", - "file": "aiplatform_v1_generated_model_service_batch_import_evaluated_annotations_sync.py", + "description": "Sample for UpdateModel", + "file": "aiplatform_v1_generated_model_service_update_model_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_BatchImportEvaluatedAnnotations_sync", + "regionTag": "aiplatform_v1_generated_ModelService_UpdateModel_sync", "segments": [ { - "end": 51, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 54, "start": 27, "type": "SHORT" }, @@ -27930,22 +31200,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 51, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_batch_import_evaluated_annotations_sync.py" + "title": "aiplatform_v1_generated_model_service_update_model_sync.py" }, { "canonical": true, @@ -27955,27 +31225,27 @@ "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.batch_import_model_evaluation_slices", + "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.upload_model", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.BatchImportModelEvaluationSlices", + "fullName": "google.cloud.aiplatform.v1.ModelService.UploadModel", "service": { "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, - "shortName": "BatchImportModelEvaluationSlices" + "shortName": "UploadModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.BatchImportModelEvaluationSlicesRequest" + "type": "google.cloud.aiplatform_v1.types.UploadModelRequest" }, { "name": "parent", "type": "str" }, { - "name": "model_evaluation_slices", - "type": "MutableSequence[google.cloud.aiplatform_v1.types.ModelEvaluationSlice]" + "name": "model", + "type": "google.cloud.aiplatform_v1.types.Model" }, { "name": "retry", @@ -27990,22 +31260,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.BatchImportModelEvaluationSlicesResponse", - "shortName": "batch_import_model_evaluation_slices" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "upload_model" }, - "description": "Sample for BatchImportModelEvaluationSlices", - "file": "aiplatform_v1_generated_model_service_batch_import_model_evaluation_slices_async.py", + "description": "Sample for UploadModel", + "file": "aiplatform_v1_generated_model_service_upload_model_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_BatchImportModelEvaluationSlices_async", + "regionTag": "aiplatform_v1_generated_ModelService_UploadModel_async", "segments": [ { - "end": 51, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 59, "start": 27, "type": "SHORT" }, @@ -28015,22 +31285,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_batch_import_model_evaluation_slices_async.py" + "title": "aiplatform_v1_generated_model_service_upload_model_async.py" }, { "canonical": true, @@ -28039,27 +31309,27 @@ "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.batch_import_model_evaluation_slices", + "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.upload_model", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.BatchImportModelEvaluationSlices", + "fullName": "google.cloud.aiplatform.v1.ModelService.UploadModel", "service": { "fullName": "google.cloud.aiplatform.v1.ModelService", "shortName": "ModelService" }, - "shortName": "BatchImportModelEvaluationSlices" + "shortName": "UploadModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.BatchImportModelEvaluationSlicesRequest" + "type": "google.cloud.aiplatform_v1.types.UploadModelRequest" }, { "name": "parent", "type": "str" }, { - "name": "model_evaluation_slices", - "type": "MutableSequence[google.cloud.aiplatform_v1.types.ModelEvaluationSlice]" + "name": "model", + "type": "google.cloud.aiplatform_v1.types.Model" }, { "name": "retry", @@ -28074,22 +31344,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.BatchImportModelEvaluationSlicesResponse", - "shortName": "batch_import_model_evaluation_slices" + "resultType": "google.api_core.operation.Operation", + "shortName": "upload_model" }, - "description": "Sample for BatchImportModelEvaluationSlices", - "file": "aiplatform_v1_generated_model_service_batch_import_model_evaluation_slices_sync.py", + "description": "Sample for UploadModel", + "file": "aiplatform_v1_generated_model_service_upload_model_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_BatchImportModelEvaluationSlices_sync", + "regionTag": "aiplatform_v1_generated_ModelService_UploadModel_sync", "segments": [ { - "end": 51, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 59, "start": 27, "type": "SHORT" }, @@ -28099,51 +31369,59 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_batch_import_model_evaluation_slices_sync.py" + "title": "aiplatform_v1_generated_model_service_upload_model_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.copy_model", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.assign_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.CopyModel", + "fullName": "google.cloud.aiplatform.v1.NotebookService.AssignNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "CopyModel" + "shortName": "AssignNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CopyModelRequest" + "type": "google.cloud.aiplatform_v1.types.AssignNotebookRuntimeRequest" }, { "name": "parent", "type": "str" }, { - "name": "source_model", + "name": "notebook_runtime_template", + "type": "str" + }, + { + "name": "notebook_runtime", + "type": "google.cloud.aiplatform_v1.types.NotebookRuntime" + }, + { + "name": "notebook_runtime_id", "type": "str" }, { @@ -28160,21 +31438,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "copy_model" + "shortName": "assign_notebook_runtime" }, - "description": "Sample for CopyModel", - "file": "aiplatform_v1_generated_model_service_copy_model_async.py", + "description": "Sample for AssignNotebookRuntime", + "file": "aiplatform_v1_generated_notebook_service_assign_notebook_runtime_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_CopyModel_async", + "regionTag": "aiplatform_v1_generated_NotebookService_AssignNotebookRuntime_async", "segments": [ { - "end": 57, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 61, "start": 27, "type": "SHORT" }, @@ -28184,50 +31462,58 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 47, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 48, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_copy_model_async.py" + "title": "aiplatform_v1_generated_notebook_service_assign_notebook_runtime_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.copy_model", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.assign_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.CopyModel", + "fullName": "google.cloud.aiplatform.v1.NotebookService.AssignNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "CopyModel" + "shortName": "AssignNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.CopyModelRequest" + "type": "google.cloud.aiplatform_v1.types.AssignNotebookRuntimeRequest" }, { "name": "parent", "type": "str" }, { - "name": "source_model", + "name": "notebook_runtime_template", + "type": "str" + }, + { + "name": "notebook_runtime", + "type": "google.cloud.aiplatform_v1.types.NotebookRuntime" + }, + { + "name": "notebook_runtime_id", "type": "str" }, { @@ -28244,21 +31530,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "copy_model" + "shortName": "assign_notebook_runtime" }, - "description": "Sample for CopyModel", - "file": "aiplatform_v1_generated_model_service_copy_model_sync.py", + "description": "Sample for AssignNotebookRuntime", + "file": "aiplatform_v1_generated_notebook_service_assign_notebook_runtime_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_CopyModel_sync", + "regionTag": "aiplatform_v1_generated_NotebookService_AssignNotebookRuntime_sync", "segments": [ { - "end": 57, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 61, "start": 27, "type": "SHORT" }, @@ -28268,47 +31554,55 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 47, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 48, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_copy_model_sync.py" + "title": "aiplatform_v1_generated_notebook_service_assign_notebook_runtime_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.delete_model_version", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.create_notebook_runtime_template", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModelVersion", + "fullName": "google.cloud.aiplatform.v1.NotebookService.CreateNotebookRuntimeTemplate", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "DeleteModelVersion" + "shortName": "CreateNotebookRuntimeTemplate" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteModelVersionRequest" + "type": "google.cloud.aiplatform_v1.types.CreateNotebookRuntimeTemplateRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "notebook_runtime_template", + "type": "google.cloud.aiplatform_v1.types.NotebookRuntimeTemplate" + }, + { + "name": "notebook_runtime_template_id", "type": "str" }, { @@ -28325,21 +31619,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_model_version" + "shortName": "create_notebook_runtime_template" }, - "description": "Sample for DeleteModelVersion", - "file": "aiplatform_v1_generated_model_service_delete_model_version_async.py", + "description": "Sample for CreateNotebookRuntimeTemplate", + "file": "aiplatform_v1_generated_notebook_service_create_notebook_runtime_template_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_DeleteModelVersion_async", + "regionTag": "aiplatform_v1_generated_NotebookService_CreateNotebookRuntimeTemplate_async", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -28349,46 +31643,54 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_delete_model_version_async.py" + "title": "aiplatform_v1_generated_notebook_service_create_notebook_runtime_template_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.delete_model_version", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.create_notebook_runtime_template", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModelVersion", + "fullName": "google.cloud.aiplatform.v1.NotebookService.CreateNotebookRuntimeTemplate", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "DeleteModelVersion" + "shortName": "CreateNotebookRuntimeTemplate" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteModelVersionRequest" + "type": "google.cloud.aiplatform_v1.types.CreateNotebookRuntimeTemplateRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "notebook_runtime_template", + "type": "google.cloud.aiplatform_v1.types.NotebookRuntimeTemplate" + }, + { + "name": "notebook_runtime_template_id", "type": "str" }, { @@ -28405,21 +31707,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_model_version" + "shortName": "create_notebook_runtime_template" }, - "description": "Sample for DeleteModelVersion", - "file": "aiplatform_v1_generated_model_service_delete_model_version_sync.py", + "description": "Sample for CreateNotebookRuntimeTemplate", + "file": "aiplatform_v1_generated_notebook_service_create_notebook_runtime_template_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_DeleteModelVersion_sync", + "regionTag": "aiplatform_v1_generated_NotebookService_CreateNotebookRuntimeTemplate_sync", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -28429,44 +31731,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_delete_model_version_sync.py" + "title": "aiplatform_v1_generated_notebook_service_create_notebook_runtime_template_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.delete_model", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.delete_notebook_runtime_template", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModel", + "fullName": "google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntimeTemplate", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "DeleteModel" + "shortName": "DeleteNotebookRuntimeTemplate" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteModelRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteNotebookRuntimeTemplateRequest" }, { "name": "name", @@ -28486,13 +31788,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_model" + "shortName": "delete_notebook_runtime_template" }, - "description": "Sample for DeleteModel", - "file": "aiplatform_v1_generated_model_service_delete_model_async.py", + "description": "Sample for DeleteNotebookRuntimeTemplate", + "file": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_DeleteModel_async", + "regionTag": "aiplatform_v1_generated_NotebookService_DeleteNotebookRuntimeTemplate_async", "segments": [ { "end": 55, @@ -28525,28 +31827,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_delete_model_async.py" + "title": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.delete_model", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.delete_notebook_runtime_template", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.DeleteModel", + "fullName": "google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntimeTemplate", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "DeleteModel" + "shortName": "DeleteNotebookRuntimeTemplate" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.DeleteModelRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteNotebookRuntimeTemplateRequest" }, { "name": "name", @@ -28566,13 +31868,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_model" + "shortName": "delete_notebook_runtime_template" }, - "description": "Sample for DeleteModel", - "file": "aiplatform_v1_generated_model_service_delete_model_sync.py", + "description": "Sample for DeleteNotebookRuntimeTemplate", + "file": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_DeleteModel_sync", + "regionTag": "aiplatform_v1_generated_NotebookService_DeleteNotebookRuntimeTemplate_sync", "segments": [ { "end": 55, @@ -28605,38 +31907,34 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_delete_model_sync.py" + "title": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_template_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.export_model", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.delete_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.ExportModel", + "fullName": "google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "ExportModel" + "shortName": "DeleteNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ExportModelRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteNotebookRuntimeRequest" }, { "name": "name", "type": "str" }, - { - "name": "output_config", - "type": "google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -28651,13 +31949,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "export_model" + "shortName": "delete_notebook_runtime" }, - "description": "Sample for ExportModel", - "file": "aiplatform_v1_generated_model_service_export_model_async.py", + "description": "Sample for DeleteNotebookRuntime", + "file": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_ExportModel_async", + "regionTag": "aiplatform_v1_generated_NotebookService_DeleteNotebookRuntime_async", "segments": [ { "end": 55, @@ -28690,37 +31988,33 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_export_model_async.py" + "title": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.export_model", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.delete_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.ExportModel", + "fullName": "google.cloud.aiplatform.v1.NotebookService.DeleteNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "ExportModel" + "shortName": "DeleteNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ExportModelRequest" + "type": "google.cloud.aiplatform_v1.types.DeleteNotebookRuntimeRequest" }, { "name": "name", "type": "str" }, - { - "name": "output_config", - "type": "google.cloud.aiplatform_v1.types.ExportModelRequest.OutputConfig" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -28735,13 +32029,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "export_model" + "shortName": "delete_notebook_runtime" }, - "description": "Sample for ExportModel", - "file": "aiplatform_v1_generated_model_service_export_model_sync.py", + "description": "Sample for DeleteNotebookRuntime", + "file": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_ExportModel_sync", + "regionTag": "aiplatform_v1_generated_NotebookService_DeleteNotebookRuntime_sync", "segments": [ { "end": 55, @@ -28774,29 +32068,29 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_export_model_sync.py" + "title": "aiplatform_v1_generated_notebook_service_delete_notebook_runtime_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.get_model_evaluation_slice", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.get_notebook_runtime_template", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice", + "fullName": "google.cloud.aiplatform.v1.NotebookService.GetNotebookRuntimeTemplate", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "GetModelEvaluationSlice" + "shortName": "GetNotebookRuntimeTemplate" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest" + "type": "google.cloud.aiplatform_v1.types.GetNotebookRuntimeTemplateRequest" }, { "name": "name", @@ -28815,14 +32109,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluationSlice", - "shortName": "get_model_evaluation_slice" + "resultType": "google.cloud.aiplatform_v1.types.NotebookRuntimeTemplate", + "shortName": "get_notebook_runtime_template" }, - "description": "Sample for GetModelEvaluationSlice", - "file": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py", + "description": "Sample for GetNotebookRuntimeTemplate", + "file": "aiplatform_v1_generated_notebook_service_get_notebook_runtime_template_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_async", + "regionTag": "aiplatform_v1_generated_NotebookService_GetNotebookRuntimeTemplate_async", "segments": [ { "end": 51, @@ -28855,28 +32149,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_async.py" + "title": "aiplatform_v1_generated_notebook_service_get_notebook_runtime_template_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.get_model_evaluation_slice", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.get_notebook_runtime_template", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluationSlice", + "fullName": "google.cloud.aiplatform.v1.NotebookService.GetNotebookRuntimeTemplate", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "GetModelEvaluationSlice" + "shortName": "GetNotebookRuntimeTemplate" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationSliceRequest" + "type": "google.cloud.aiplatform_v1.types.GetNotebookRuntimeTemplateRequest" }, { "name": "name", @@ -28895,14 +32189,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluationSlice", - "shortName": "get_model_evaluation_slice" + "resultType": "google.cloud.aiplatform_v1.types.NotebookRuntimeTemplate", + "shortName": "get_notebook_runtime_template" }, - "description": "Sample for GetModelEvaluationSlice", - "file": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_sync.py", + "description": "Sample for GetNotebookRuntimeTemplate", + "file": "aiplatform_v1_generated_notebook_service_get_notebook_runtime_template_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluationSlice_sync", + "regionTag": "aiplatform_v1_generated_NotebookService_GetNotebookRuntimeTemplate_sync", "segments": [ { "end": 51, @@ -28935,29 +32229,29 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_get_model_evaluation_slice_sync.py" + "title": "aiplatform_v1_generated_notebook_service_get_notebook_runtime_template_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.get_model_evaluation", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.get_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluation", + "fullName": "google.cloud.aiplatform.v1.NotebookService.GetNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "GetModelEvaluation" + "shortName": "GetNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationRequest" + "type": "google.cloud.aiplatform_v1.types.GetNotebookRuntimeRequest" }, { "name": "name", @@ -28976,14 +32270,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", - "shortName": "get_model_evaluation" + "resultType": "google.cloud.aiplatform_v1.types.NotebookRuntime", + "shortName": "get_notebook_runtime" }, - "description": "Sample for GetModelEvaluation", - "file": "aiplatform_v1_generated_model_service_get_model_evaluation_async.py", + "description": "Sample for GetNotebookRuntime", + "file": "aiplatform_v1_generated_notebook_service_get_notebook_runtime_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluation_async", + "regionTag": "aiplatform_v1_generated_NotebookService_GetNotebookRuntime_async", "segments": [ { "end": 51, @@ -29016,28 +32310,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_get_model_evaluation_async.py" + "title": "aiplatform_v1_generated_notebook_service_get_notebook_runtime_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.get_model_evaluation", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.get_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.GetModelEvaluation", + "fullName": "google.cloud.aiplatform.v1.NotebookService.GetNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "GetModelEvaluation" + "shortName": "GetNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetModelEvaluationRequest" + "type": "google.cloud.aiplatform_v1.types.GetNotebookRuntimeRequest" }, { "name": "name", @@ -29056,14 +32350,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", - "shortName": "get_model_evaluation" + "resultType": "google.cloud.aiplatform_v1.types.NotebookRuntime", + "shortName": "get_notebook_runtime" }, - "description": "Sample for GetModelEvaluation", - "file": "aiplatform_v1_generated_model_service_get_model_evaluation_sync.py", + "description": "Sample for GetNotebookRuntime", + "file": "aiplatform_v1_generated_notebook_service_get_notebook_runtime_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_GetModelEvaluation_sync", + "regionTag": "aiplatform_v1_generated_NotebookService_GetNotebookRuntime_sync", "segments": [ { "end": 51, @@ -29096,32 +32390,32 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_get_model_evaluation_sync.py" + "title": "aiplatform_v1_generated_notebook_service_get_notebook_runtime_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.get_model", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.list_notebook_runtime_templates", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.GetModel", + "fullName": "google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimeTemplates", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "GetModel" + "shortName": "ListNotebookRuntimeTemplates" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetModelRequest" + "type": "google.cloud.aiplatform_v1.types.ListNotebookRuntimeTemplatesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -29137,22 +32431,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Model", - "shortName": "get_model" + "resultType": "google.cloud.aiplatform_v1.services.notebook_service.pagers.ListNotebookRuntimeTemplatesAsyncPager", + "shortName": "list_notebook_runtime_templates" }, - "description": "Sample for GetModel", - "file": "aiplatform_v1_generated_model_service_get_model_async.py", + "description": "Sample for ListNotebookRuntimeTemplates", + "file": "aiplatform_v1_generated_notebook_service_list_notebook_runtime_templates_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_GetModel_async", + "regionTag": "aiplatform_v1_generated_NotebookService_ListNotebookRuntimeTemplates_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -29172,36 +32466,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_get_model_async.py" + "title": "aiplatform_v1_generated_notebook_service_list_notebook_runtime_templates_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.get_model", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.list_notebook_runtime_templates", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.GetModel", + "fullName": "google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimeTemplates", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "GetModel" + "shortName": "ListNotebookRuntimeTemplates" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.GetModelRequest" + "type": "google.cloud.aiplatform_v1.types.ListNotebookRuntimeTemplatesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -29217,22 +32511,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Model", - "shortName": "get_model" + "resultType": "google.cloud.aiplatform_v1.services.notebook_service.pagers.ListNotebookRuntimeTemplatesPager", + "shortName": "list_notebook_runtime_templates" }, - "description": "Sample for GetModel", - "file": "aiplatform_v1_generated_model_service_get_model_sync.py", + "description": "Sample for ListNotebookRuntimeTemplates", + "file": "aiplatform_v1_generated_notebook_service_list_notebook_runtime_templates_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_GetModel_sync", + "regionTag": "aiplatform_v1_generated_NotebookService_ListNotebookRuntimeTemplates_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -29252,43 +32546,39 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_get_model_sync.py" + "title": "aiplatform_v1_generated_notebook_service_list_notebook_runtime_templates_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.import_model_evaluation", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.list_notebook_runtimes", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation", + "fullName": "google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimes", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "ImportModelEvaluation" + "shortName": "ListNotebookRuntimes" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ImportModelEvaluationRequest" + "type": "google.cloud.aiplatform_v1.types.ListNotebookRuntimesRequest" }, { "name": "parent", "type": "str" }, - { - "name": "model_evaluation", - "type": "google.cloud.aiplatform_v1.types.ModelEvaluation" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -29302,22 +32592,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", - "shortName": "import_model_evaluation" + "resultType": "google.cloud.aiplatform_v1.services.notebook_service.pagers.ListNotebookRuntimesAsyncPager", + "shortName": "list_notebook_runtimes" }, - "description": "Sample for ImportModelEvaluation", - "file": "aiplatform_v1_generated_model_service_import_model_evaluation_async.py", + "description": "Sample for ListNotebookRuntimes", + "file": "aiplatform_v1_generated_notebook_service_list_notebook_runtimes_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_ImportModelEvaluation_async", + "regionTag": "aiplatform_v1_generated_NotebookService_ListNotebookRuntimes_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -29337,42 +32627,38 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_import_model_evaluation_async.py" + "title": "aiplatform_v1_generated_notebook_service_list_notebook_runtimes_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.import_model_evaluation", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.list_notebook_runtimes", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.ImportModelEvaluation", + "fullName": "google.cloud.aiplatform.v1.NotebookService.ListNotebookRuntimes", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "ImportModelEvaluation" + "shortName": "ListNotebookRuntimes" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ImportModelEvaluationRequest" + "type": "google.cloud.aiplatform_v1.types.ListNotebookRuntimesRequest" }, { "name": "parent", "type": "str" }, - { - "name": "model_evaluation", - "type": "google.cloud.aiplatform_v1.types.ModelEvaluation" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -29386,22 +32672,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.ModelEvaluation", - "shortName": "import_model_evaluation" + "resultType": "google.cloud.aiplatform_v1.services.notebook_service.pagers.ListNotebookRuntimesPager", + "shortName": "list_notebook_runtimes" }, - "description": "Sample for ImportModelEvaluation", - "file": "aiplatform_v1_generated_model_service_import_model_evaluation_sync.py", + "description": "Sample for ListNotebookRuntimes", + "file": "aiplatform_v1_generated_notebook_service_list_notebook_runtimes_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_ImportModelEvaluation_sync", + "regionTag": "aiplatform_v1_generated_NotebookService_ListNotebookRuntimes_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -29421,37 +32707,37 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_import_model_evaluation_sync.py" + "title": "aiplatform_v1_generated_notebook_service_list_notebook_runtimes_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_model_evaluation_slices", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.start_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices", + "fullName": "google.cloud.aiplatform.v1.NotebookService.StartNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "ListModelEvaluationSlices" + "shortName": "StartNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest" + "type": "google.cloud.aiplatform_v1.types.StartNotebookRuntimeRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -29467,22 +32753,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager", - "shortName": "list_model_evaluation_slices" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "start_notebook_runtime" }, - "description": "Sample for ListModelEvaluationSlices", - "file": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_async.py", + "description": "Sample for StartNotebookRuntime", + "file": "aiplatform_v1_generated_notebook_service_start_notebook_runtime_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluationSlices_async", + "regionTag": "aiplatform_v1_generated_NotebookService_StartNotebookRuntime_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -29497,41 +32783,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_async.py" + "title": "aiplatform_v1_generated_notebook_service_start_notebook_runtime_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_model_evaluation_slices", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.start_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluationSlices", + "fullName": "google.cloud.aiplatform.v1.NotebookService.StartNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "ListModelEvaluationSlices" + "shortName": "StartNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest" + "type": "google.cloud.aiplatform_v1.types.StartNotebookRuntimeRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -29547,22 +32833,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationSlicesPager", - "shortName": "list_model_evaluation_slices" + "resultType": "google.api_core.operation.Operation", + "shortName": "start_notebook_runtime" }, - "description": "Sample for ListModelEvaluationSlices", - "file": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_sync.py", + "description": "Sample for StartNotebookRuntime", + "file": "aiplatform_v1_generated_notebook_service_start_notebook_runtime_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluationSlices_sync", + "regionTag": "aiplatform_v1_generated_NotebookService_StartNotebookRuntime_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -29577,42 +32863,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_list_model_evaluation_slices_sync.py" + "title": "aiplatform_v1_generated_notebook_service_start_notebook_runtime_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_model_evaluations", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceAsyncClient.upgrade_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluations", + "fullName": "google.cloud.aiplatform.v1.NotebookService.UpgradeNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "ListModelEvaluations" + "shortName": "UpgradeNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest" + "type": "google.cloud.aiplatform_v1.types.UpgradeNotebookRuntimeRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -29628,22 +32914,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsAsyncPager", - "shortName": "list_model_evaluations" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "upgrade_notebook_runtime" }, - "description": "Sample for ListModelEvaluations", - "file": "aiplatform_v1_generated_model_service_list_model_evaluations_async.py", + "description": "Sample for UpgradeNotebookRuntime", + "file": "aiplatform_v1_generated_notebook_service_upgrade_notebook_runtime_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluations_async", + "regionTag": "aiplatform_v1_generated_NotebookService_UpgradeNotebookRuntime_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -29658,41 +32944,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_list_model_evaluations_async.py" + "title": "aiplatform_v1_generated_notebook_service_upgrade_notebook_runtime_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_model_evaluations", + "fullName": "google.cloud.aiplatform_v1.NotebookServiceClient.upgrade_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelEvaluations", + "fullName": "google.cloud.aiplatform.v1.NotebookService.UpgradeNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "ListModelEvaluations" + "shortName": "UpgradeNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest" + "type": "google.cloud.aiplatform_v1.types.UpgradeNotebookRuntimeRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -29708,22 +32994,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelEvaluationsPager", - "shortName": "list_model_evaluations" + "resultType": "google.api_core.operation.Operation", + "shortName": "upgrade_notebook_runtime" }, - "description": "Sample for ListModelEvaluations", - "file": "aiplatform_v1_generated_model_service_list_model_evaluations_sync.py", + "description": "Sample for UpgradeNotebookRuntime", + "file": "aiplatform_v1_generated_notebook_service_upgrade_notebook_runtime_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_ListModelEvaluations_sync", + "regionTag": "aiplatform_v1_generated_NotebookService_UpgradeNotebookRuntime_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -29738,42 +33024,50 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_list_model_evaluations_sync.py" + "title": "aiplatform_v1_generated_notebook_service_upgrade_notebook_runtime_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient", + "shortName": "PersistentResourceServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_model_versions", + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient.create_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelVersions", + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService.CreatePersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "ListModelVersions" + "shortName": "CreatePersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListModelVersionsRequest" + "type": "google.cloud.aiplatform_v1.types.CreatePersistentResourceRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "persistent_resource", + "type": "google.cloud.aiplatform_v1.types.PersistentResource" + }, + { + "name": "persistent_resource_id", "type": "str" }, { @@ -29789,22 +33083,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelVersionsAsyncPager", - "shortName": "list_model_versions" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_persistent_resource" }, - "description": "Sample for ListModelVersions", - "file": "aiplatform_v1_generated_model_service_list_model_versions_async.py", + "description": "Sample for CreatePersistentResource", + "file": "aiplatform_v1_generated_persistent_resource_service_create_persistent_resource_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_ListModelVersions_async", + "regionTag": "aiplatform_v1_generated_PersistentResourceService_CreatePersistentResource_async", "segments": [ { - "end": 52, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 56, "start": 27, "type": "SHORT" }, @@ -29814,46 +33108,54 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_list_model_versions_async.py" + "title": "aiplatform_v1_generated_persistent_resource_service_create_persistent_resource_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceClient", + "shortName": "PersistentResourceServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_model_versions", + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceClient.create_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.ListModelVersions", + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService.CreatePersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "ListModelVersions" + "shortName": "CreatePersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListModelVersionsRequest" + "type": "google.cloud.aiplatform_v1.types.CreatePersistentResourceRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "persistent_resource", + "type": "google.cloud.aiplatform_v1.types.PersistentResource" + }, + { + "name": "persistent_resource_id", "type": "str" }, { @@ -29869,22 +33171,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelVersionsPager", - "shortName": "list_model_versions" + "resultType": "google.api_core.operation.Operation", + "shortName": "create_persistent_resource" }, - "description": "Sample for ListModelVersions", - "file": "aiplatform_v1_generated_model_service_list_model_versions_sync.py", + "description": "Sample for CreatePersistentResource", + "file": "aiplatform_v1_generated_persistent_resource_service_create_persistent_resource_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_ListModelVersions_sync", + "regionTag": "aiplatform_v1_generated_PersistentResourceService_CreatePersistentResource_sync", "segments": [ { - "end": 52, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 56, "start": 27, "type": "SHORT" }, @@ -29894,47 +33196,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_list_model_versions_sync.py" + "title": "aiplatform_v1_generated_persistent_resource_service_create_persistent_resource_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient", + "shortName": "PersistentResourceServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.list_models", + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient.delete_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.ListModels", + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService.DeletePersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "ListModels" + "shortName": "DeletePersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListModelsRequest" + "type": "google.cloud.aiplatform_v1.types.DeletePersistentResourceRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -29950,22 +33252,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsAsyncPager", - "shortName": "list_models" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_persistent_resource" }, - "description": "Sample for ListModels", - "file": "aiplatform_v1_generated_model_service_list_models_async.py", + "description": "Sample for DeletePersistentResource", + "file": "aiplatform_v1_generated_persistent_resource_service_delete_persistent_resource_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_ListModels_async", + "regionTag": "aiplatform_v1_generated_PersistentResourceService_DeletePersistentResource_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -29980,41 +33282,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_list_models_async.py" + "title": "aiplatform_v1_generated_persistent_resource_service_delete_persistent_resource_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceClient", + "shortName": "PersistentResourceServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.list_models", + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceClient.delete_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.ListModels", + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService.DeletePersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "ListModels" + "shortName": "DeletePersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.ListModelsRequest" + "type": "google.cloud.aiplatform_v1.types.DeletePersistentResourceRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -30030,22 +33332,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.services.model_service.pagers.ListModelsPager", - "shortName": "list_models" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_persistent_resource" }, - "description": "Sample for ListModels", - "file": "aiplatform_v1_generated_model_service_list_models_sync.py", + "description": "Sample for DeletePersistentResource", + "file": "aiplatform_v1_generated_persistent_resource_service_delete_persistent_resource_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_ListModels_sync", + "regionTag": "aiplatform_v1_generated_PersistentResourceService_DeletePersistentResource_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -30060,48 +33362,44 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_list_models_sync.py" + "title": "aiplatform_v1_generated_persistent_resource_service_delete_persistent_resource_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient", + "shortName": "PersistentResourceServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.merge_version_aliases", + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient.get_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.MergeVersionAliases", + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService.GetPersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "MergeVersionAliases" + "shortName": "GetPersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.MergeVersionAliasesRequest" + "type": "google.cloud.aiplatform_v1.types.GetPersistentResourceRequest" }, { "name": "name", "type": "str" }, - { - "name": "version_aliases", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -30115,22 +33413,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Model", - "shortName": "merge_version_aliases" + "resultType": "google.cloud.aiplatform_v1.types.PersistentResource", + "shortName": "get_persistent_resource" }, - "description": "Sample for MergeVersionAliases", - "file": "aiplatform_v1_generated_model_service_merge_version_aliases_async.py", + "description": "Sample for GetPersistentResource", + "file": "aiplatform_v1_generated_persistent_resource_service_get_persistent_resource_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_MergeVersionAliases_async", + "regionTag": "aiplatform_v1_generated_PersistentResourceService_GetPersistentResource_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -30140,52 +33438,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_merge_version_aliases_async.py" + "title": "aiplatform_v1_generated_persistent_resource_service_get_persistent_resource_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceClient", + "shortName": "PersistentResourceServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.merge_version_aliases", + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceClient.get_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.MergeVersionAliases", + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService.GetPersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "MergeVersionAliases" + "shortName": "GetPersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.MergeVersionAliasesRequest" + "type": "google.cloud.aiplatform_v1.types.GetPersistentResourceRequest" }, { "name": "name", "type": "str" }, - { - "name": "version_aliases", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -30199,22 +33493,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Model", - "shortName": "merge_version_aliases" + "resultType": "google.cloud.aiplatform_v1.types.PersistentResource", + "shortName": "get_persistent_resource" }, - "description": "Sample for MergeVersionAliases", - "file": "aiplatform_v1_generated_model_service_merge_version_aliases_sync.py", + "description": "Sample for GetPersistentResource", + "file": "aiplatform_v1_generated_persistent_resource_service_get_persistent_resource_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_MergeVersionAliases_sync", + "regionTag": "aiplatform_v1_generated_PersistentResourceService_GetPersistentResource_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -30224,47 +33518,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_merge_version_aliases_sync.py" + "title": "aiplatform_v1_generated_persistent_resource_service_get_persistent_resource_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient", + "shortName": "PersistentResourceServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.update_explanation_dataset", + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient.list_persistent_resources", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.UpdateExplanationDataset", + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService.ListPersistentResources", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "UpdateExplanationDataset" + "shortName": "ListPersistentResources" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateExplanationDatasetRequest" + "type": "google.cloud.aiplatform_v1.types.ListPersistentResourcesRequest" }, { - "name": "model", + "name": "parent", "type": "str" }, { @@ -30280,22 +33574,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_explanation_dataset" + "resultType": "google.cloud.aiplatform_v1.services.persistent_resource_service.pagers.ListPersistentResourcesAsyncPager", + "shortName": "list_persistent_resources" }, - "description": "Sample for UpdateExplanationDataset", - "file": "aiplatform_v1_generated_model_service_update_explanation_dataset_async.py", + "description": "Sample for ListPersistentResources", + "file": "aiplatform_v1_generated_persistent_resource_service_list_persistent_resources_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_UpdateExplanationDataset_async", + "regionTag": "aiplatform_v1_generated_PersistentResourceService_ListPersistentResources_async", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -30310,41 +33604,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_update_explanation_dataset_async.py" + "title": "aiplatform_v1_generated_persistent_resource_service_list_persistent_resources_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceClient", + "shortName": "PersistentResourceServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.update_explanation_dataset", + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceClient.list_persistent_resources", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.UpdateExplanationDataset", + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService.ListPersistentResources", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "UpdateExplanationDataset" + "shortName": "ListPersistentResources" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateExplanationDatasetRequest" + "type": "google.cloud.aiplatform_v1.types.ListPersistentResourcesRequest" }, { - "name": "model", + "name": "parent", "type": "str" }, { @@ -30360,22 +33654,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_explanation_dataset" + "resultType": "google.cloud.aiplatform_v1.services.persistent_resource_service.pagers.ListPersistentResourcesPager", + "shortName": "list_persistent_resources" }, - "description": "Sample for UpdateExplanationDataset", - "file": "aiplatform_v1_generated_model_service_update_explanation_dataset_sync.py", + "description": "Sample for ListPersistentResources", + "file": "aiplatform_v1_generated_persistent_resource_service_list_persistent_resources_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_UpdateExplanationDataset_sync", + "regionTag": "aiplatform_v1_generated_PersistentResourceService_ListPersistentResources_sync", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -30390,47 +33684,43 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_update_explanation_dataset_sync.py" + "title": "aiplatform_v1_generated_persistent_resource_service_list_persistent_resources_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient", + "shortName": "PersistentResourceServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.update_model", + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient.reboot_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.UpdateModel", + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService.RebootPersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "UpdateModel" + "shortName": "RebootPersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateModelRequest" - }, - { - "name": "model", - "type": "google.cloud.aiplatform_v1.types.Model" + "type": "google.cloud.aiplatform_v1.types.RebootPersistentResourceRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -30445,22 +33735,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Model", - "shortName": "update_model" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "reboot_persistent_resource" }, - "description": "Sample for UpdateModel", - "file": "aiplatform_v1_generated_model_service_update_model_async.py", + "description": "Sample for RebootPersistentResource", + "file": "aiplatform_v1_generated_persistent_resource_service_reboot_persistent_resource_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_UpdateModel_async", + "regionTag": "aiplatform_v1_generated_PersistentResourceService_RebootPersistentResource_async", "segments": [ { - "end": 54, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 55, "start": 27, "type": "SHORT" }, @@ -30470,51 +33760,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 49, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_update_model_async.py" + "title": "aiplatform_v1_generated_persistent_resource_service_reboot_persistent_resource_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceClient", + "shortName": "PersistentResourceServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.update_model", + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceClient.reboot_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.UpdateModel", + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService.RebootPersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "UpdateModel" + "shortName": "RebootPersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UpdateModelRequest" - }, - { - "name": "model", - "type": "google.cloud.aiplatform_v1.types.Model" + "type": "google.cloud.aiplatform_v1.types.RebootPersistentResourceRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -30529,22 +33815,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1.types.Model", - "shortName": "update_model" + "resultType": "google.api_core.operation.Operation", + "shortName": "reboot_persistent_resource" }, - "description": "Sample for UpdateModel", - "file": "aiplatform_v1_generated_model_service_update_model_sync.py", + "description": "Sample for RebootPersistentResource", + "file": "aiplatform_v1_generated_persistent_resource_service_reboot_persistent_resource_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_UpdateModel_sync", + "regionTag": "aiplatform_v1_generated_PersistentResourceService_RebootPersistentResource_sync", "segments": [ { - "end": 54, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 55, "start": 27, "type": "SHORT" }, @@ -30554,52 +33840,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 49, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_update_model_sync.py" + "title": "aiplatform_v1_generated_persistent_resource_service_reboot_persistent_resource_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient", + "shortName": "PersistentResourceServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceAsyncClient.upload_model", + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceAsyncClient.update_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.UploadModel", + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService.UpdatePersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "UploadModel" + "shortName": "UpdatePersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UploadModelRequest" + "type": "google.cloud.aiplatform_v1.types.UpdatePersistentResourceRequest" }, { - "name": "parent", - "type": "str" + "name": "persistent_resource", + "type": "google.cloud.aiplatform_v1.types.PersistentResource" }, { - "name": "model", - "type": "google.cloud.aiplatform_v1.types.Model" + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -30615,21 +33901,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "upload_model" + "shortName": "update_persistent_resource" }, - "description": "Sample for UploadModel", - "file": "aiplatform_v1_generated_model_service_upload_model_async.py", + "description": "Sample for UpdatePersistentResource", + "file": "aiplatform_v1_generated_persistent_resource_service_update_persistent_resource_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_UploadModel_async", + "regionTag": "aiplatform_v1_generated_PersistentResourceService_UpdatePersistentResource_async", "segments": [ { - "end": 59, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 54, "start": 27, "type": "SHORT" }, @@ -30639,51 +33925,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 51, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_upload_model_async.py" + "title": "aiplatform_v1_generated_persistent_resource_service_update_persistent_resource_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceClient", + "shortName": "PersistentResourceServiceClient" }, - "fullName": "google.cloud.aiplatform_v1.ModelServiceClient.upload_model", + "fullName": "google.cloud.aiplatform_v1.PersistentResourceServiceClient.update_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1.ModelService.UploadModel", + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService.UpdatePersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "UploadModel" + "shortName": "UpdatePersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1.types.UploadModelRequest" + "type": "google.cloud.aiplatform_v1.types.UpdatePersistentResourceRequest" }, { - "name": "parent", - "type": "str" + "name": "persistent_resource", + "type": "google.cloud.aiplatform_v1.types.PersistentResource" }, { - "name": "model", - "type": "google.cloud.aiplatform_v1.types.Model" + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -30699,21 +33985,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "upload_model" + "shortName": "update_persistent_resource" }, - "description": "Sample for UploadModel", - "file": "aiplatform_v1_generated_model_service_upload_model_sync.py", + "description": "Sample for UpdatePersistentResource", + "file": "aiplatform_v1_generated_persistent_resource_service_update_persistent_resource_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1_generated_ModelService_UploadModel_sync", + "regionTag": "aiplatform_v1_generated_PersistentResourceService_UpdatePersistentResource_sync", "segments": [ { - "end": 59, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 54, "start": 27, "type": "SHORT" }, @@ -30723,22 +34009,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 51, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1_generated_model_service_upload_model_sync.py" + "title": "aiplatform_v1_generated_persistent_resource_service_update_persistent_resource_sync.py" }, { "canonical": true, diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index 55f758cffe..b4b1341f51 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.46.0" + "version": "1.47.0" }, "snippets": [ { @@ -5127,34 +5127,98 @@ "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.EvaluationServiceAsyncClient", + "shortName": "EvaluationServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.create_feature_online_store", + "fullName": "google.cloud.aiplatform_v1beta1.EvaluationServiceAsyncClient.evaluate_instances", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.CreateFeatureOnlineStore", + "fullName": "google.cloud.aiplatform.v1beta1.EvaluationService.EvaluateInstances", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.EvaluationService", + "shortName": "EvaluationService" }, - "shortName": "CreateFeatureOnlineStore" + "shortName": "EvaluateInstances" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureOnlineStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.EvaluateInstancesRequest" }, { - "name": "parent", - "type": "str" + "name": "retry", + "type": "google.api_core.retry.Retry" }, { - "name": "feature_online_store", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore" + "name": "timeout", + "type": "float" }, { - "name": "feature_online_store_id", - "type": "str" + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.EvaluateInstancesResponse", + "shortName": "evaluate_instances" + }, + "description": "Sample for EvaluateInstances", + "file": "aiplatform_v1beta1_generated_evaluation_service_evaluate_instances_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_EvaluationService_EvaluateInstances_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_evaluation_service_evaluate_instances_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.EvaluationServiceClient", + "shortName": "EvaluationServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.EvaluationServiceClient.evaluate_instances", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.EvaluationService.EvaluateInstances", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.EvaluationService", + "shortName": "EvaluationService" + }, + "shortName": "EvaluateInstances" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.EvaluateInstancesRequest" }, { "name": "retry", @@ -5169,22 +5233,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_feature_online_store" + "resultType": "google.cloud.aiplatform_v1beta1.types.EvaluateInstancesResponse", + "shortName": "evaluate_instances" }, - "description": "Sample for CreateFeatureOnlineStore", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_online_store_async.py", + "description": "Sample for EvaluateInstances", + "file": "aiplatform_v1beta1_generated_evaluation_service_evaluate_instances_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_CreateFeatureOnlineStore_async", + "regionTag": "aiplatform_v1beta1_generated_EvaluationService_EvaluateInstances_sync", "segments": [ { - "end": 61, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 61, + "end": 51, "start": 27, "type": "SHORT" }, @@ -5194,54 +5258,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 52, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 62, - "start": 59, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_online_store_async.py" + "title": "aiplatform_v1beta1_generated_evaluation_service_evaluate_instances_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionExecutionServiceAsyncClient", + "shortName": "ExtensionExecutionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.create_feature_online_store", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionExecutionServiceAsyncClient.execute_extension", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.CreateFeatureOnlineStore", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionExecutionService.ExecuteExtension", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionExecutionService", + "shortName": "ExtensionExecutionService" }, - "shortName": "CreateFeatureOnlineStore" + "shortName": "ExecuteExtension" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureOnlineStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ExecuteExtensionRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { - "name": "feature_online_store", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore" - }, - { - "name": "feature_online_store_id", + "name": "operation_id", "type": "str" }, { @@ -5257,22 +5318,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_feature_online_store" + "resultType": "google.cloud.aiplatform_v1beta1.types.ExecuteExtensionResponse", + "shortName": "execute_extension" }, - "description": "Sample for CreateFeatureOnlineStore", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_online_store_sync.py", + "description": "Sample for ExecuteExtension", + "file": "aiplatform_v1beta1_generated_extension_execution_service_execute_extension_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_CreateFeatureOnlineStore_sync", + "regionTag": "aiplatform_v1beta1_generated_ExtensionExecutionService_ExecuteExtension_async", "segments": [ { - "end": 61, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 61, + "end": 52, "start": 27, "type": "SHORT" }, @@ -5282,55 +5343,50 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 52, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 62, - "start": 59, + "end": 53, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_online_store_sync.py" + "title": "aiplatform_v1beta1_generated_extension_execution_service_execute_extension_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionExecutionServiceClient", + "shortName": "ExtensionExecutionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.create_feature_view", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionExecutionServiceClient.execute_extension", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.CreateFeatureView", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionExecutionService.ExecuteExtension", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionExecutionService", + "shortName": "ExtensionExecutionService" }, - "shortName": "CreateFeatureView" + "shortName": "ExecuteExtension" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureViewRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ExecuteExtensionRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { - "name": "feature_view", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureView" - }, - { - "name": "feature_view_id", + "name": "operation_id", "type": "str" }, { @@ -5346,22 +5402,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_feature_view" + "resultType": "google.cloud.aiplatform_v1beta1.types.ExecuteExtensionResponse", + "shortName": "execute_extension" }, - "description": "Sample for CreateFeatureView", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_view_async.py", + "description": "Sample for ExecuteExtension", + "file": "aiplatform_v1beta1_generated_extension_execution_service_execute_extension_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_CreateFeatureView_async", + "regionTag": "aiplatform_v1beta1_generated_ExtensionExecutionService_ExecuteExtension_sync", "segments": [ { - "end": 61, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 61, + "end": 52, "start": 27, "type": "SHORT" }, @@ -5371,55 +5427,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 52, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 62, - "start": 59, + "end": 53, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_view_async.py" + "title": "aiplatform_v1beta1_generated_extension_execution_service_execute_extension_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionExecutionServiceAsyncClient", + "shortName": "ExtensionExecutionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.create_feature_view", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionExecutionServiceAsyncClient.query_extension", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.CreateFeatureView", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionExecutionService.QueryExtension", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionExecutionService", + "shortName": "ExtensionExecutionService" }, - "shortName": "CreateFeatureView" + "shortName": "QueryExtension" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureViewRequest" + "type": "google.cloud.aiplatform_v1beta1.types.QueryExtensionRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { - "name": "feature_view", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureView" - }, - { - "name": "feature_view_id", - "type": "str" + "name": "contents", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]" }, { "name": "retry", @@ -5434,22 +5487,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_feature_view" + "resultType": "google.cloud.aiplatform_v1beta1.types.QueryExtensionResponse", + "shortName": "query_extension" }, - "description": "Sample for CreateFeatureView", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_view_sync.py", + "description": "Sample for QueryExtension", + "file": "aiplatform_v1beta1_generated_extension_execution_service_query_extension_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_CreateFeatureView_sync", + "regionTag": "aiplatform_v1beta1_generated_ExtensionExecutionService_QueryExtension_async", "segments": [ { - "end": 61, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 61, + "end": 55, "start": 27, "type": "SHORT" }, @@ -5459,52 +5512,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 52, + "end": 52, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 62, - "start": 59, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_view_sync.py" + "title": "aiplatform_v1beta1_generated_extension_execution_service_query_extension_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionExecutionServiceClient", + "shortName": "ExtensionExecutionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.delete_feature_online_store", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionExecutionServiceClient.query_extension", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.DeleteFeatureOnlineStore", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionExecutionService.QueryExtension", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionExecutionService", + "shortName": "ExtensionExecutionService" }, - "shortName": "DeleteFeatureOnlineStore" + "shortName": "QueryExtension" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureOnlineStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.QueryExtensionRequest" }, { "name": "name", "type": "str" }, { - "name": "force", - "type": "bool" + "name": "contents", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]" }, { "name": "retry", @@ -5519,14 +5571,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_feature_online_store" + "resultType": "google.cloud.aiplatform_v1beta1.types.QueryExtensionResponse", + "shortName": "query_extension" }, - "description": "Sample for DeleteFeatureOnlineStore", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_online_store_async.py", + "description": "Sample for QueryExtension", + "file": "aiplatform_v1beta1_generated_extension_execution_service_query_extension_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_DeleteFeatureOnlineStore_async", + "regionTag": "aiplatform_v1beta1_generated_ExtensionExecutionService_QueryExtension_sync", "segments": [ { "end": 55, @@ -5544,13 +5596,13 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { "end": 52, - "start": 46, + "start": 50, "type": "REQUEST_EXECUTION" }, { @@ -5559,37 +5611,34 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_online_store_async.py" + "title": "aiplatform_v1beta1_generated_extension_execution_service_query_extension_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient", + "shortName": "ExtensionRegistryServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.delete_feature_online_store", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient.delete_extension", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.DeleteFeatureOnlineStore", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService.DeleteExtension", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService", + "shortName": "ExtensionRegistryService" }, - "shortName": "DeleteFeatureOnlineStore" + "shortName": "DeleteExtension" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureOnlineStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteExtensionRequest" }, { "name": "name", "type": "str" }, - { - "name": "force", - "type": "bool" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -5603,14 +5652,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_feature_online_store" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_extension" }, - "description": "Sample for DeleteFeatureOnlineStore", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_online_store_sync.py", + "description": "Sample for DeleteExtension", + "file": "aiplatform_v1beta1_generated_extension_registry_service_delete_extension_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_DeleteFeatureOnlineStore_sync", + "regionTag": "aiplatform_v1beta1_generated_ExtensionRegistryService_DeleteExtension_async", "segments": [ { "end": 55, @@ -5643,29 +5692,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_online_store_sync.py" + "title": "aiplatform_v1beta1_generated_extension_registry_service_delete_extension_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceClient", + "shortName": "ExtensionRegistryServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.delete_feature_view", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceClient.delete_extension", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.DeleteFeatureView", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService.DeleteExtension", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService", + "shortName": "ExtensionRegistryService" }, - "shortName": "DeleteFeatureView" + "shortName": "DeleteExtension" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureViewRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteExtensionRequest" }, { "name": "name", @@ -5684,14 +5732,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_feature_view" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_extension" }, - "description": "Sample for DeleteFeatureView", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_view_async.py", + "description": "Sample for DeleteExtension", + "file": "aiplatform_v1beta1_generated_extension_registry_service_delete_extension_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_DeleteFeatureView_async", + "regionTag": "aiplatform_v1beta1_generated_ExtensionRegistryService_DeleteExtension_sync", "segments": [ { "end": 55, @@ -5724,28 +5772,29 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_view_async.py" + "title": "aiplatform_v1beta1_generated_extension_registry_service_delete_extension_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient", + "shortName": "ExtensionRegistryServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.delete_feature_view", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient.get_extension", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.DeleteFeatureView", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService.GetExtension", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService", + "shortName": "ExtensionRegistryService" }, - "shortName": "DeleteFeatureView" + "shortName": "GetExtension" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureViewRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetExtensionRequest" }, { "name": "name", @@ -5764,22 +5813,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_feature_view" + "resultType": "google.cloud.aiplatform_v1beta1.types.Extension", + "shortName": "get_extension" }, - "description": "Sample for DeleteFeatureView", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_view_sync.py", + "description": "Sample for GetExtension", + "file": "aiplatform_v1beta1_generated_extension_registry_service_get_extension_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_DeleteFeatureView_sync", + "regionTag": "aiplatform_v1beta1_generated_ExtensionRegistryService_GetExtension_async", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -5794,39 +5843,38 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_view_sync.py" + "title": "aiplatform_v1beta1_generated_extension_registry_service_get_extension_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceClient", + "shortName": "ExtensionRegistryServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.get_feature_online_store", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceClient.get_extension", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.GetFeatureOnlineStore", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService.GetExtension", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService", + "shortName": "ExtensionRegistryService" }, - "shortName": "GetFeatureOnlineStore" + "shortName": "GetExtension" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureOnlineStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetExtensionRequest" }, { "name": "name", @@ -5845,14 +5893,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore", - "shortName": "get_feature_online_store" + "resultType": "google.cloud.aiplatform_v1beta1.types.Extension", + "shortName": "get_extension" }, - "description": "Sample for GetFeatureOnlineStore", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_online_store_async.py", + "description": "Sample for GetExtension", + "file": "aiplatform_v1beta1_generated_extension_registry_service_get_extension_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_GetFeatureOnlineStore_async", + "regionTag": "aiplatform_v1beta1_generated_ExtensionRegistryService_GetExtension_sync", "segments": [ { "end": 51, @@ -5885,33 +5933,38 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_online_store_async.py" + "title": "aiplatform_v1beta1_generated_extension_registry_service_get_extension_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient", + "shortName": "ExtensionRegistryServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.get_feature_online_store", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient.import_extension", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.GetFeatureOnlineStore", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ImportExtension", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService", + "shortName": "ExtensionRegistryService" }, - "shortName": "GetFeatureOnlineStore" + "shortName": "ImportExtension" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureOnlineStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ImportExtensionRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "extension", + "type": "google.cloud.aiplatform_v1beta1.types.Extension" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -5925,22 +5978,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore", - "shortName": "get_feature_online_store" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_extension" }, - "description": "Sample for GetFeatureOnlineStore", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_online_store_sync.py", + "description": "Sample for ImportExtension", + "file": "aiplatform_v1beta1_generated_extension_registry_service_import_extension_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_GetFeatureOnlineStore_sync", + "regionTag": "aiplatform_v1beta1_generated_ExtensionRegistryService_ImportExtension_async", "segments": [ { - "end": 51, + "end": 65, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 65, "start": 27, "type": "SHORT" }, @@ -5950,49 +6003,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 55, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 62, + "start": 56, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 66, + "start": 63, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_online_store_sync.py" + "title": "aiplatform_v1beta1_generated_extension_registry_service_import_extension_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceClient", + "shortName": "ExtensionRegistryServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.get_feature_view_sync", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceClient.import_extension", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.GetFeatureViewSync", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ImportExtension", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService", + "shortName": "ExtensionRegistryService" }, - "shortName": "GetFeatureViewSync" + "shortName": "ImportExtension" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureViewSyncRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ImportExtensionRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "extension", + "type": "google.cloud.aiplatform_v1beta1.types.Extension" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -6006,22 +6062,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureViewSync", - "shortName": "get_feature_view_sync" + "resultType": "google.api_core.operation.Operation", + "shortName": "import_extension" }, - "description": "Sample for GetFeatureViewSync", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_sync_async.py", + "description": "Sample for ImportExtension", + "file": "aiplatform_v1beta1_generated_extension_registry_service_import_extension_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_GetFeatureViewSync_async", + "regionTag": "aiplatform_v1beta1_generated_ExtensionRegistryService_ImportExtension_sync", "segments": [ { - "end": 51, + "end": 65, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 65, "start": 27, "type": "SHORT" }, @@ -6031,46 +6087,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 55, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 62, + "start": 56, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 66, + "start": 63, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_sync_async.py" + "title": "aiplatform_v1beta1_generated_extension_registry_service_import_extension_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient", + "shortName": "ExtensionRegistryServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.get_feature_view_sync", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient.list_extensions", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.GetFeatureViewSync", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ListExtensions", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService", + "shortName": "ExtensionRegistryService" }, - "shortName": "GetFeatureViewSync" + "shortName": "ListExtensions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureViewSyncRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListExtensionsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -6086,22 +6143,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureViewSync", - "shortName": "get_feature_view_sync" + "resultType": "google.cloud.aiplatform_v1beta1.services.extension_registry_service.pagers.ListExtensionsAsyncPager", + "shortName": "list_extensions" }, - "description": "Sample for GetFeatureViewSync", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_sync_sync.py", + "description": "Sample for ListExtensions", + "file": "aiplatform_v1beta1_generated_extension_registry_service_list_extensions_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_GetFeatureViewSync_sync", + "regionTag": "aiplatform_v1beta1_generated_ExtensionRegistryService_ListExtensions_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -6121,37 +6178,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_sync_sync.py" + "title": "aiplatform_v1beta1_generated_extension_registry_service_list_extensions_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceClient", + "shortName": "ExtensionRegistryServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.get_feature_view", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceClient.list_extensions", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.GetFeatureView", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService.ListExtensions", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService", + "shortName": "ExtensionRegistryService" }, - "shortName": "GetFeatureView" + "shortName": "ListExtensions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureViewRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListExtensionsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -6167,22 +6223,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureView", - "shortName": "get_feature_view" + "resultType": "google.cloud.aiplatform_v1beta1.services.extension_registry_service.pagers.ListExtensionsPager", + "shortName": "list_extensions" }, - "description": "Sample for GetFeatureView", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_async.py", + "description": "Sample for ListExtensions", + "file": "aiplatform_v1beta1_generated_extension_registry_service_list_extensions_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_GetFeatureView_async", + "regionTag": "aiplatform_v1beta1_generated_ExtensionRegistryService_ListExtensions_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -6202,37 +6258,42 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_async.py" + "title": "aiplatform_v1beta1_generated_extension_registry_service_list_extensions_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient", + "shortName": "ExtensionRegistryServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.get_feature_view", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceAsyncClient.update_extension", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.GetFeatureView", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService.UpdateExtension", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService", + "shortName": "ExtensionRegistryService" }, - "shortName": "GetFeatureView" + "shortName": "UpdateExtension" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureViewRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateExtensionRequest" }, { - "name": "name", - "type": "str" + "name": "extension", + "type": "google.cloud.aiplatform_v1beta1.types.Extension" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -6247,22 +6308,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureView", - "shortName": "get_feature_view" + "resultType": "google.cloud.aiplatform_v1beta1.types.Extension", + "shortName": "update_extension" }, - "description": "Sample for GetFeatureView", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_sync.py", + "description": "Sample for UpdateExtension", + "file": "aiplatform_v1beta1_generated_extension_registry_service_update_extension_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_GetFeatureView_sync", + "regionTag": "aiplatform_v1beta1_generated_ExtensionRegistryService_UpdateExtension_async", "segments": [ { - "end": 51, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 60, "start": 27, "type": "SHORT" }, @@ -6272,48 +6333,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 57, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_sync.py" + "title": "aiplatform_v1beta1_generated_extension_registry_service_update_extension_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceClient", + "shortName": "ExtensionRegistryServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.list_feature_online_stores", + "fullName": "google.cloud.aiplatform_v1beta1.ExtensionRegistryServiceClient.update_extension", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.ListFeatureOnlineStores", + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService.UpdateExtension", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.ExtensionRegistryService", + "shortName": "ExtensionRegistryService" }, - "shortName": "ListFeatureOnlineStores" + "shortName": "UpdateExtension" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureOnlineStoresRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateExtensionRequest" }, { - "name": "parent", - "type": "str" + "name": "extension", + "type": "google.cloud.aiplatform_v1beta1.types.Extension" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -6328,22 +6392,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service.pagers.ListFeatureOnlineStoresAsyncPager", - "shortName": "list_feature_online_stores" + "resultType": "google.cloud.aiplatform_v1beta1.types.Extension", + "shortName": "update_extension" }, - "description": "Sample for ListFeatureOnlineStores", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_online_stores_async.py", + "description": "Sample for UpdateExtension", + "file": "aiplatform_v1beta1_generated_extension_registry_service_update_extension_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_ListFeatureOnlineStores_async", + "regionTag": "aiplatform_v1beta1_generated_ExtensionRegistryService_UpdateExtension_sync", "segments": [ { - "end": 52, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 60, "start": 27, "type": "SHORT" }, @@ -6353,48 +6417,57 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 57, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_online_stores_async.py" + "title": "aiplatform_v1beta1_generated_extension_registry_service_update_extension_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.list_feature_online_stores", + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.create_feature_online_store", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.ListFeatureOnlineStores", + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.CreateFeatureOnlineStore", "service": { "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", "shortName": "FeatureOnlineStoreAdminService" }, - "shortName": "ListFeatureOnlineStores" + "shortName": "CreateFeatureOnlineStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureOnlineStoresRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureOnlineStoreRequest" }, { "name": "parent", "type": "str" }, + { + "name": "feature_online_store", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore" + }, + { + "name": "feature_online_store_id", + "type": "str" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -6408,22 +6481,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service.pagers.ListFeatureOnlineStoresPager", - "shortName": "list_feature_online_stores" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_feature_online_store" }, - "description": "Sample for ListFeatureOnlineStores", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_online_stores_sync.py", + "description": "Sample for CreateFeatureOnlineStore", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_online_store_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_ListFeatureOnlineStores_sync", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_CreateFeatureOnlineStore_async", "segments": [ { - "end": 52, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 61, "start": 27, "type": "SHORT" }, @@ -6433,49 +6506,56 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_online_stores_sync.py" + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_online_store_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.list_feature_view_syncs", + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.create_feature_online_store", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.ListFeatureViewSyncs", + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.CreateFeatureOnlineStore", "service": { "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", "shortName": "FeatureOnlineStoreAdminService" }, - "shortName": "ListFeatureViewSyncs" + "shortName": "CreateFeatureOnlineStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureViewSyncsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureOnlineStoreRequest" }, { "name": "parent", "type": "str" }, + { + "name": "feature_online_store", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore" + }, + { + "name": "feature_online_store_id", + "type": "str" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -6489,22 +6569,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service.pagers.ListFeatureViewSyncsAsyncPager", - "shortName": "list_feature_view_syncs" + "resultType": "google.api_core.operation.Operation", + "shortName": "create_feature_online_store" }, - "description": "Sample for ListFeatureViewSyncs", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_view_syncs_async.py", + "description": "Sample for CreateFeatureOnlineStore", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_online_store_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_ListFeatureViewSyncs_async", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_CreateFeatureOnlineStore_sync", "segments": [ { - "end": 52, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 61, "start": 27, "type": "SHORT" }, @@ -6514,48 +6594,57 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_view_syncs_async.py" + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_online_store_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.list_feature_view_syncs", + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.create_feature_view", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.ListFeatureViewSyncs", + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.CreateFeatureView", "service": { "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", "shortName": "FeatureOnlineStoreAdminService" }, - "shortName": "ListFeatureViewSyncs" + "shortName": "CreateFeatureView" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureViewSyncsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureViewRequest" }, { "name": "parent", "type": "str" }, + { + "name": "feature_view", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureView" + }, + { + "name": "feature_view_id", + "type": "str" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -6569,22 +6658,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service.pagers.ListFeatureViewSyncsPager", - "shortName": "list_feature_view_syncs" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_feature_view" }, - "description": "Sample for ListFeatureViewSyncs", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_view_syncs_sync.py", + "description": "Sample for CreateFeatureView", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_view_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_ListFeatureViewSyncs_sync", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_CreateFeatureView_async", "segments": [ { - "end": 52, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 61, "start": 27, "type": "SHORT" }, @@ -6594,49 +6683,56 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_view_syncs_sync.py" + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_view_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.list_feature_views", + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.create_feature_view", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.ListFeatureViews", + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.CreateFeatureView", "service": { "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", "shortName": "FeatureOnlineStoreAdminService" }, - "shortName": "ListFeatureViews" + "shortName": "CreateFeatureView" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureViewsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureViewRequest" }, { "name": "parent", "type": "str" }, + { + "name": "feature_view", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureView" + }, + { + "name": "feature_view_id", + "type": "str" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -6650,22 +6746,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service.pagers.ListFeatureViewsAsyncPager", - "shortName": "list_feature_views" + "resultType": "google.api_core.operation.Operation", + "shortName": "create_feature_view" }, - "description": "Sample for ListFeatureViews", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_views_async.py", + "description": "Sample for CreateFeatureView", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_view_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_ListFeatureViews_async", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_CreateFeatureView_sync", "segments": [ { - "end": 52, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 61, "start": 27, "type": "SHORT" }, @@ -6675,48 +6771,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_views_async.py" + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_create_feature_view_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.list_feature_views", + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.delete_feature_online_store", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.ListFeatureViews", + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.DeleteFeatureOnlineStore", "service": { "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", "shortName": "FeatureOnlineStoreAdminService" }, - "shortName": "ListFeatureViews" + "shortName": "DeleteFeatureOnlineStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureViewsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureOnlineStoreRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, + { + "name": "force", + "type": "bool" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -6730,22 +6831,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service.pagers.ListFeatureViewsPager", - "shortName": "list_feature_views" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_feature_online_store" }, - "description": "Sample for ListFeatureViews", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_views_sync.py", + "description": "Sample for DeleteFeatureOnlineStore", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_online_store_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_ListFeatureViews_sync", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_DeleteFeatureOnlineStore_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -6760,44 +6861,47 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_views_sync.py" + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_online_store_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.sync_feature_view", + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.delete_feature_online_store", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.SyncFeatureView", + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.DeleteFeatureOnlineStore", "service": { "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", "shortName": "FeatureOnlineStoreAdminService" }, - "shortName": "SyncFeatureView" + "shortName": "DeleteFeatureOnlineStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.SyncFeatureViewRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureOnlineStoreRequest" }, { - "name": "feature_view", + "name": "name", "type": "str" }, + { + "name": "force", + "type": "bool" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -6811,22 +6915,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.SyncFeatureViewResponse", - "shortName": "sync_feature_view" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_feature_online_store" }, - "description": "Sample for SyncFeatureView", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_sync_feature_view_async.py", + "description": "Sample for DeleteFeatureOnlineStore", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_online_store_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_SyncFeatureView_async", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_DeleteFeatureOnlineStore_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -6841,41 +6945,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_sync_feature_view_async.py" + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_online_store_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.sync_feature_view", + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.delete_feature_view", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.SyncFeatureView", + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.DeleteFeatureView", "service": { "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", "shortName": "FeatureOnlineStoreAdminService" }, - "shortName": "SyncFeatureView" + "shortName": "DeleteFeatureView" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.SyncFeatureViewRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureViewRequest" }, { - "name": "feature_view", + "name": "name", "type": "str" }, { @@ -6891,22 +6996,4618 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.SyncFeatureViewResponse", - "shortName": "sync_feature_view" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_feature_view" }, - "description": "Sample for SyncFeatureView", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_sync_feature_view_sync.py", + "description": "Sample for DeleteFeatureView", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_DeleteFeatureView_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.delete_feature_view", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.DeleteFeatureView", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "DeleteFeatureView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_feature_view" + }, + "description": "Sample for DeleteFeatureView", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_DeleteFeatureView_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_delete_feature_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.get_feature_online_store", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.GetFeatureOnlineStore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "GetFeatureOnlineStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureOnlineStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore", + "shortName": "get_feature_online_store" + }, + "description": "Sample for GetFeatureOnlineStore", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_online_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_GetFeatureOnlineStore_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_online_store_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.get_feature_online_store", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.GetFeatureOnlineStore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "GetFeatureOnlineStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureOnlineStoreRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore", + "shortName": "get_feature_online_store" + }, + "description": "Sample for GetFeatureOnlineStore", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_online_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_GetFeatureOnlineStore_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_online_store_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.get_feature_view_sync", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.GetFeatureViewSync", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "GetFeatureViewSync" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureViewSyncRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureViewSync", + "shortName": "get_feature_view_sync" + }, + "description": "Sample for GetFeatureViewSync", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_sync_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_GetFeatureViewSync_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_sync_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.get_feature_view_sync", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.GetFeatureViewSync", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "GetFeatureViewSync" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureViewSyncRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureViewSync", + "shortName": "get_feature_view_sync" + }, + "description": "Sample for GetFeatureViewSync", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_sync_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_GetFeatureViewSync_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_sync_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.get_feature_view", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.GetFeatureView", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "GetFeatureView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureView", + "shortName": "get_feature_view" + }, + "description": "Sample for GetFeatureView", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_GetFeatureView_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.get_feature_view", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.GetFeatureView", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "GetFeatureView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureViewRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureView", + "shortName": "get_feature_view" + }, + "description": "Sample for GetFeatureView", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_GetFeatureView_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_get_feature_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.list_feature_online_stores", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.ListFeatureOnlineStores", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "ListFeatureOnlineStores" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureOnlineStoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service.pagers.ListFeatureOnlineStoresAsyncPager", + "shortName": "list_feature_online_stores" + }, + "description": "Sample for ListFeatureOnlineStores", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_online_stores_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_ListFeatureOnlineStores_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_online_stores_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.list_feature_online_stores", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.ListFeatureOnlineStores", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "ListFeatureOnlineStores" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureOnlineStoresRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service.pagers.ListFeatureOnlineStoresPager", + "shortName": "list_feature_online_stores" + }, + "description": "Sample for ListFeatureOnlineStores", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_online_stores_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_ListFeatureOnlineStores_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_online_stores_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.list_feature_view_syncs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.ListFeatureViewSyncs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "ListFeatureViewSyncs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureViewSyncsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service.pagers.ListFeatureViewSyncsAsyncPager", + "shortName": "list_feature_view_syncs" + }, + "description": "Sample for ListFeatureViewSyncs", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_view_syncs_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_ListFeatureViewSyncs_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_view_syncs_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.list_feature_view_syncs", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.ListFeatureViewSyncs", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "ListFeatureViewSyncs" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureViewSyncsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service.pagers.ListFeatureViewSyncsPager", + "shortName": "list_feature_view_syncs" + }, + "description": "Sample for ListFeatureViewSyncs", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_view_syncs_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_ListFeatureViewSyncs_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_view_syncs_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.list_feature_views", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.ListFeatureViews", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "ListFeatureViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service.pagers.ListFeatureViewsAsyncPager", + "shortName": "list_feature_views" + }, + "description": "Sample for ListFeatureViews", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_views_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_ListFeatureViews_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_views_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.list_feature_views", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.ListFeatureViews", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "ListFeatureViews" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureViewsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.feature_online_store_admin_service.pagers.ListFeatureViewsPager", + "shortName": "list_feature_views" + }, + "description": "Sample for ListFeatureViews", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_views_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_ListFeatureViews_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_list_feature_views_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.sync_feature_view", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.SyncFeatureView", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "SyncFeatureView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SyncFeatureViewRequest" + }, + { + "name": "feature_view", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.SyncFeatureViewResponse", + "shortName": "sync_feature_view" + }, + "description": "Sample for SyncFeatureView", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_sync_feature_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_SyncFeatureView_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_sync_feature_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.sync_feature_view", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.SyncFeatureView", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "SyncFeatureView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SyncFeatureViewRequest" + }, + { + "name": "feature_view", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.SyncFeatureViewResponse", + "shortName": "sync_feature_view" + }, + "description": "Sample for SyncFeatureView", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_sync_feature_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_SyncFeatureView_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_sync_feature_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.update_feature_online_store", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.UpdateFeatureOnlineStore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "UpdateFeatureOnlineStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureOnlineStoreRequest" + }, + { + "name": "feature_online_store", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_feature_online_store" + }, + "description": "Sample for UpdateFeatureOnlineStore", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_online_store_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_UpdateFeatureOnlineStore_async", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_online_store_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.update_feature_online_store", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.UpdateFeatureOnlineStore", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "UpdateFeatureOnlineStore" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureOnlineStoreRequest" + }, + { + "name": "feature_online_store", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_feature_online_store" + }, + "description": "Sample for UpdateFeatureOnlineStore", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_online_store_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_UpdateFeatureOnlineStore_sync", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_online_store_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", + "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.update_feature_view", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.UpdateFeatureView", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "UpdateFeatureView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureViewRequest" + }, + { + "name": "feature_view", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_feature_view" + }, + "description": "Sample for UpdateFeatureView", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_view_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_UpdateFeatureView_async", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_view_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", + "shortName": "FeatureOnlineStoreAdminServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.update_feature_view", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.UpdateFeatureView", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", + "shortName": "FeatureOnlineStoreAdminService" + }, + "shortName": "UpdateFeatureView" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureViewRequest" + }, + { + "name": "feature_view", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureView" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_feature_view" + }, + "description": "Sample for UpdateFeatureView", + "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_view_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_UpdateFeatureView_sync", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_view_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceAsyncClient", + "shortName": "FeatureOnlineStoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceAsyncClient.fetch_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.FetchFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService", + "shortName": "FeatureOnlineStoreService" + }, + "shortName": "FetchFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.FetchFeatureValuesRequest" + }, + { + "name": "feature_view", + "type": "str" + }, + { + "name": "data_key", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureViewDataKey" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.FetchFeatureValuesResponse", + "shortName": "fetch_feature_values" + }, + "description": "Sample for FetchFeatureValues", + "file": "aiplatform_v1beta1_generated_feature_online_store_service_fetch_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreService_FetchFeatureValues_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_service_fetch_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceClient", + "shortName": "FeatureOnlineStoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceClient.fetch_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.FetchFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService", + "shortName": "FeatureOnlineStoreService" + }, + "shortName": "FetchFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.FetchFeatureValuesRequest" + }, + { + "name": "feature_view", + "type": "str" + }, + { + "name": "data_key", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureViewDataKey" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.FetchFeatureValuesResponse", + "shortName": "fetch_feature_values" + }, + "description": "Sample for FetchFeatureValues", + "file": "aiplatform_v1beta1_generated_feature_online_store_service_fetch_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreService_FetchFeatureValues_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 49, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 50, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_service_fetch_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceAsyncClient", + "shortName": "FeatureOnlineStoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceAsyncClient.search_nearest_entities", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.SearchNearestEntities", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService", + "shortName": "FeatureOnlineStoreService" + }, + "shortName": "SearchNearestEntities" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchNearestEntitiesRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.SearchNearestEntitiesResponse", + "shortName": "search_nearest_entities" + }, + "description": "Sample for SearchNearestEntities", + "file": "aiplatform_v1beta1_generated_feature_online_store_service_search_nearest_entities_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreService_SearchNearestEntities_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_service_search_nearest_entities_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceClient", + "shortName": "FeatureOnlineStoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceClient.search_nearest_entities", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.SearchNearestEntities", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService", + "shortName": "FeatureOnlineStoreService" + }, + "shortName": "SearchNearestEntities" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.SearchNearestEntitiesRequest" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.SearchNearestEntitiesResponse", + "shortName": "search_nearest_entities" + }, + "description": "Sample for SearchNearestEntities", + "file": "aiplatform_v1beta1_generated_feature_online_store_service_search_nearest_entities_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreService_SearchNearestEntities_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_service_search_nearest_entities_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceAsyncClient", + "shortName": "FeatureOnlineStoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceAsyncClient.streaming_fetch_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.StreamingFetchFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService", + "shortName": "FeatureOnlineStoreService" + }, + "shortName": "StreamingFetchFeatureValues" + }, + "parameters": [ + { + "name": "requests", + "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamingFetchFeatureValuesRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingFetchFeatureValuesResponse]", + "shortName": "streaming_fetch_feature_values" + }, + "description": "Sample for StreamingFetchFeatureValues", + "file": "aiplatform_v1beta1_generated_feature_online_store_service_streaming_fetch_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreService_StreamingFetchFeatureValues_async", + "segments": [ + { + "end": 62, + "start": 27, + "type": "FULL" + }, + { + "end": 62, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 55, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 56, + "type": "REQUEST_EXECUTION" + }, + { + "end": 63, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_service_streaming_fetch_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceClient", + "shortName": "FeatureOnlineStoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceClient.streaming_fetch_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.StreamingFetchFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService", + "shortName": "FeatureOnlineStoreService" + }, + "shortName": "StreamingFetchFeatureValues" + }, + "parameters": [ + { + "name": "requests", + "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamingFetchFeatureValuesRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingFetchFeatureValuesResponse]", + "shortName": "streaming_fetch_feature_values" + }, + "description": "Sample for StreamingFetchFeatureValues", + "file": "aiplatform_v1beta1_generated_feature_online_store_service_streaming_fetch_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreService_StreamingFetchFeatureValues_sync", + "segments": [ + { + "end": 62, + "start": 27, + "type": "FULL" + }, + { + "end": 62, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 55, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 58, + "start": 56, + "type": "REQUEST_EXECUTION" + }, + { + "end": 63, + "start": 59, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_online_store_service_streaming_fetch_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", + "shortName": "FeatureRegistryServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.create_feature_group", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.CreateFeatureGroup", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "CreateFeatureGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureGroupRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "feature_group", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureGroup" + }, + { + "name": "feature_group_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_feature_group" + }, + "description": "Sample for CreateFeatureGroup", + "file": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_group_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_CreateFeatureGroup_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_group_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", + "shortName": "FeatureRegistryServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.create_feature_group", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.CreateFeatureGroup", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "CreateFeatureGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureGroupRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "feature_group", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureGroup" + }, + { + "name": "feature_group_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_feature_group" + }, + "description": "Sample for CreateFeatureGroup", + "file": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_group_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_CreateFeatureGroup_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_group_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", + "shortName": "FeatureRegistryServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.create_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.CreateFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "CreateFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "feature_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_feature" + }, + "description": "Sample for CreateFeature", + "file": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_CreateFeature_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", + "shortName": "FeatureRegistryServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.create_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.CreateFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "CreateFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "feature_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_feature" + }, + "description": "Sample for CreateFeature", + "file": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_CreateFeature_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", + "shortName": "FeatureRegistryServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.delete_feature_group", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.DeleteFeatureGroup", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "DeleteFeatureGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureGroupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_feature_group" + }, + "description": "Sample for DeleteFeatureGroup", + "file": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_group_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_DeleteFeatureGroup_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_group_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", + "shortName": "FeatureRegistryServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.delete_feature_group", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.DeleteFeatureGroup", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "DeleteFeatureGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureGroupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "force", + "type": "bool" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_feature_group" + }, + "description": "Sample for DeleteFeatureGroup", + "file": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_group_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_DeleteFeatureGroup_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_group_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", + "shortName": "FeatureRegistryServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.delete_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.DeleteFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "DeleteFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_feature" + }, + "description": "Sample for DeleteFeature", + "file": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_DeleteFeature_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", + "shortName": "FeatureRegistryServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.delete_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.DeleteFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "DeleteFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_feature" + }, + "description": "Sample for DeleteFeature", + "file": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_DeleteFeature_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", + "shortName": "FeatureRegistryServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.get_feature_group", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.GetFeatureGroup", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "GetFeatureGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureGroupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureGroup", + "shortName": "get_feature_group" + }, + "description": "Sample for GetFeatureGroup", + "file": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_group_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_GetFeatureGroup_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_group_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", + "shortName": "FeatureRegistryServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.get_feature_group", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.GetFeatureGroup", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "GetFeatureGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureGroupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureGroup", + "shortName": "get_feature_group" + }, + "description": "Sample for GetFeatureGroup", + "file": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_group_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_GetFeatureGroup_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_group_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", + "shortName": "FeatureRegistryServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.get_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.GetFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "GetFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "get_feature" + }, + "description": "Sample for GetFeature", + "file": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_GetFeature_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", + "shortName": "FeatureRegistryServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.get_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.GetFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "GetFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "get_feature" + }, + "description": "Sample for GetFeature", + "file": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_GetFeature_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", + "shortName": "FeatureRegistryServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.list_feature_groups", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.ListFeatureGroups", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "ListFeatureGroups" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureGroupsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.feature_registry_service.pagers.ListFeatureGroupsAsyncPager", + "shortName": "list_feature_groups" + }, + "description": "Sample for ListFeatureGroups", + "file": "aiplatform_v1beta1_generated_feature_registry_service_list_feature_groups_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_ListFeatureGroups_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_list_feature_groups_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", + "shortName": "FeatureRegistryServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.list_feature_groups", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.ListFeatureGroups", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "ListFeatureGroups" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureGroupsRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.feature_registry_service.pagers.ListFeatureGroupsPager", + "shortName": "list_feature_groups" + }, + "description": "Sample for ListFeatureGroups", + "file": "aiplatform_v1beta1_generated_feature_registry_service_list_feature_groups_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_ListFeatureGroups_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_list_feature_groups_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", + "shortName": "FeatureRegistryServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.list_features", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.ListFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "ListFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.feature_registry_service.pagers.ListFeaturesAsyncPager", + "shortName": "list_features" + }, + "description": "Sample for ListFeatures", + "file": "aiplatform_v1beta1_generated_feature_registry_service_list_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_ListFeatures_async", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_list_features_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", + "shortName": "FeatureRegistryServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.list_features", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.ListFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "ListFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.services.feature_registry_service.pagers.ListFeaturesPager", + "shortName": "list_features" + }, + "description": "Sample for ListFeatures", + "file": "aiplatform_v1beta1_generated_feature_registry_service_list_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_ListFeatures_sync", + "segments": [ + { + "end": 52, + "start": 27, + "type": "FULL" + }, + { + "end": 52, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 53, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_list_features_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", + "shortName": "FeatureRegistryServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.update_feature_group", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.UpdateFeatureGroup", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "UpdateFeatureGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureGroupRequest" + }, + { + "name": "feature_group", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureGroup" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_feature_group" + }, + "description": "Sample for UpdateFeatureGroup", + "file": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_group_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_UpdateFeatureGroup_async", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_group_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", + "shortName": "FeatureRegistryServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.update_feature_group", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.UpdateFeatureGroup", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "UpdateFeatureGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureGroupRequest" + }, + { + "name": "feature_group", + "type": "google.cloud.aiplatform_v1beta1.types.FeatureGroup" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_feature_group" + }, + "description": "Sample for UpdateFeatureGroup", + "file": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_group_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_UpdateFeatureGroup_sync", + "segments": [ + { + "end": 58, + "start": 27, + "type": "FULL" + }, + { + "end": 58, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 55, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 59, + "start": 56, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_group_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", + "shortName": "FeatureRegistryServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.update_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.UpdateFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "UpdateFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_feature" + }, + "description": "Sample for UpdateFeature", + "file": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_UpdateFeature_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", + "shortName": "FeatureRegistryServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.update_feature", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.UpdateFeature", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", + "shortName": "FeatureRegistryService" + }, + "shortName": "UpdateFeature" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest" + }, + { + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "update_feature" + }, + "description": "Sample for UpdateFeature", + "file": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_UpdateFeature_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 44, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 45, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient", + "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient.read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "ReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse", + "shortName": "read_feature_values" + }, + "description": "Sample for ReadFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient", + "shortName": "FeaturestoreOnlineServingServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient.read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "ReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse", + "shortName": "read_feature_values" + }, + "description": "Sample for ReadFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient", + "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient.streaming_read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.StreamingReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "StreamingReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]", + "shortName": "streaming_read_feature_values" + }, + "description": "Sample for StreamingReadFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient", + "shortName": "FeaturestoreOnlineServingServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient.streaming_read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.StreamingReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "StreamingReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]", + "shortName": "streaming_read_feature_values" + }, + "description": "Sample for StreamingReadFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync", + "segments": [ + { + "end": 57, + "start": 27, + "type": "FULL" + }, + { + "end": 57, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 58, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient", + "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient.write_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.WriteFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "WriteFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.WriteFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "payloads", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.WriteFeatureValuesPayload]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteFeatureValuesResponse", + "shortName": "write_feature_values" + }, + "description": "Sample for WriteFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_write_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_WriteFeatureValues_async", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_write_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient", + "shortName": "FeaturestoreOnlineServingServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient.write_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.WriteFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", + "shortName": "FeaturestoreOnlineServingService" + }, + "shortName": "WriteFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.WriteFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" + }, + { + "name": "payloads", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.WriteFeatureValuesPayload]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteFeatureValuesResponse", + "shortName": "write_feature_values" + }, + "description": "Sample for WriteFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_write_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_WriteFeatureValues_sync", + "segments": [ + { + "end": 55, + "start": 27, + "type": "FULL" + }, + { + "end": 55, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 56, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_write_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.batch_create_features", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "BatchCreateFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_create_features" + }, + "description": "Sample for BatchCreateFeatures", + "file": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchCreateFeatures_async", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.batch_create_features", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "BatchCreateFeatures" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "requests", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_create_features" + }, + "description": "Sample for BatchCreateFeatures", + "file": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchCreateFeatures_sync", + "segments": [ + { + "end": 60, + "start": 27, + "type": "FULL" + }, + { + "end": 60, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 50, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 57, + "start": 51, + "type": "REQUEST_EXECUTION" + }, + { + "end": 61, + "start": 58, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.batch_read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "BatchReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest" + }, + { + "name": "featurestore", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_read_feature_values" + }, + "description": "Sample for BatchReadFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_async", + "segments": [ + { + "end": 68, + "start": 27, + "type": "FULL" + }, + { + "end": 68, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 58, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 65, + "start": 59, + "type": "REQUEST_EXECUTION" + }, + { + "end": 69, + "start": 66, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.batch_read_feature_values", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "BatchReadFeatureValues" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest" + }, + { + "name": "featurestore", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_read_feature_values" + }, + "description": "Sample for BatchReadFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_sync", + "segments": [ + { + "end": 68, + "start": 27, + "type": "FULL" + }, + { + "end": 68, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 58, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 65, + "start": 59, + "type": "REQUEST_EXECUTION" + }, + { + "end": 69, + "start": 66, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.create_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + }, + { + "name": "entity_type_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_entity_type" + }, + "description": "Sample for CreateEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_SyncFeatureView_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateEntityType_async", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -6916,52 +11617,144 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_sync_feature_view_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.create_entity_type", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" + }, + "shortName": "CreateEntityType" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "entity_type", + "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + }, + { + "name": "entity_type_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_entity_type" + }, + "description": "Sample for CreateEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateEntityType_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.update_feature_online_store", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.create_feature", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.UpdateFeatureOnlineStore", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "UpdateFeatureOnlineStore" + "shortName": "CreateFeature" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureOnlineStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest" }, { - "name": "feature_online_store", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "feature_id", + "type": "str" }, { "name": "retry", @@ -6977,21 +11770,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_feature_online_store" + "shortName": "create_feature" }, - "description": "Sample for UpdateFeatureOnlineStore", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_online_store_async.py", + "description": "Sample for CreateFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_create_feature_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_UpdateFeatureOnlineStore_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeature_async", "segments": [ { - "end": 59, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 56, "start": 27, "type": "SHORT" }, @@ -7001,51 +11794,55 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_online_store_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_create_feature_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.update_feature_online_store", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.create_feature", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.UpdateFeatureOnlineStore", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "UpdateFeatureOnlineStore" + "shortName": "CreateFeature" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureOnlineStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest" }, { - "name": "feature_online_store", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureOnlineStore" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "feature_id", + "type": "str" }, { "name": "retry", @@ -7061,21 +11858,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "update_feature_online_store" + "shortName": "create_feature" }, - "description": "Sample for UpdateFeatureOnlineStore", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_online_store_sync.py", + "description": "Sample for CreateFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_create_feature_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_UpdateFeatureOnlineStore_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeature_sync", "segments": [ { - "end": 59, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 56, "start": 27, "type": "SHORT" }, @@ -7085,52 +11882,56 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_online_store_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_create_feature_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient", - "shortName": "FeatureOnlineStoreAdminServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceAsyncClient.update_feature_view", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.create_featurestore", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.UpdateFeatureView", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "UpdateFeatureView" + "shortName": "CreateFeaturestore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureViewRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest" }, { - "name": "feature_view", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureView" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "featurestore", + "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" + }, + { + "name": "featurestore_id", + "type": "str" }, { "name": "retry", @@ -7146,21 +11947,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_feature_view" + "shortName": "create_featurestore" }, - "description": "Sample for UpdateFeatureView", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_view_async.py", + "description": "Sample for CreateFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_UpdateFeatureView_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeaturestore_async", "segments": [ { - "end": 59, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 56, "start": 27, "type": "SHORT" }, @@ -7170,51 +11971,55 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_view_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient", - "shortName": "FeatureOnlineStoreAdminServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreAdminServiceClient.update_feature_view", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.create_featurestore", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService.UpdateFeatureView", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreAdminService", - "shortName": "FeatureOnlineStoreAdminService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "UpdateFeatureView" + "shortName": "CreateFeaturestore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureViewRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest" }, { - "name": "feature_view", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureView" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "featurestore", + "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" + }, + { + "name": "featurestore_id", + "type": "str" }, { "name": "retry", @@ -7230,21 +12035,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "update_feature_view" + "shortName": "create_featurestore" }, - "description": "Sample for UpdateFeatureView", - "file": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_view_sync.py", + "description": "Sample for CreateFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreAdminService_UpdateFeatureView_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeaturestore_sync", "segments": [ { - "end": 59, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 56, "start": 27, "type": "SHORT" }, @@ -7254,52 +12059,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_admin_service_update_feature_view_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceAsyncClient", - "shortName": "FeatureOnlineStoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceAsyncClient.fetch_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_entity_type", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.FetchFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteEntityType", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService", - "shortName": "FeatureOnlineStoreService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "FetchFeatureValues" + "shortName": "DeleteEntityType" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.FetchFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest" }, { - "name": "feature_view", + "name": "name", "type": "str" }, { - "name": "data_key", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureViewDataKey" + "name": "force", + "type": "bool" }, { "name": "retry", @@ -7314,22 +12119,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.FetchFeatureValuesResponse", - "shortName": "fetch_feature_values" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_entity_type" }, - "description": "Sample for FetchFeatureValues", - "file": "aiplatform_v1beta1_generated_feature_online_store_service_fetch_feature_values_async.py", + "description": "Sample for DeleteEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreService_FetchFeatureValues_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteEntityType_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -7339,51 +12144,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 50, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_service_fetch_feature_values_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceClient", - "shortName": "FeatureOnlineStoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceClient.fetch_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_entity_type", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.FetchFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteEntityType", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService", - "shortName": "FeatureOnlineStoreService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "FetchFeatureValues" + "shortName": "DeleteEntityType" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.FetchFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest" }, { - "name": "feature_view", + "name": "name", "type": "str" }, { - "name": "data_key", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureViewDataKey" + "name": "force", + "type": "bool" }, { "name": "retry", @@ -7398,22 +12203,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.FetchFeatureValuesResponse", - "shortName": "fetch_feature_values" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_entity_type" }, - "description": "Sample for FetchFeatureValues", - "file": "aiplatform_v1beta1_generated_feature_online_store_service_fetch_feature_values_sync.py", + "description": "Sample for DeleteEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreService_FetchFeatureValues_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteEntityType_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -7423,44 +12228,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 50, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_service_fetch_feature_values_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceAsyncClient", - "shortName": "FeatureOnlineStoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceAsyncClient.search_nearest_entities", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_feature_values", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.SearchNearestEntities", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeatureValues", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService", - "shortName": "FeatureOnlineStoreService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "SearchNearestEntities" + "shortName": "DeleteFeatureValues" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.SearchNearestEntitiesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" }, { "name": "retry", @@ -7475,22 +12284,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.SearchNearestEntitiesResponse", - "shortName": "search_nearest_entities" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_feature_values" }, - "description": "Sample for SearchNearestEntities", - "file": "aiplatform_v1beta1_generated_feature_online_store_service_search_nearest_entities_async.py", + "description": "Sample for DeleteFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_values_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreService_SearchNearestEntities_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeatureValues_async", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -7505,38 +12314,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 56, "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_service_search_nearest_entities_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_values_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceClient", - "shortName": "FeatureOnlineStoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceClient.search_nearest_entities", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_feature_values", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.SearchNearestEntities", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeatureValues", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService", - "shortName": "FeatureOnlineStoreService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "SearchNearestEntities" + "shortName": "DeleteFeatureValues" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.SearchNearestEntitiesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureValuesRequest" + }, + { + "name": "entity_type", + "type": "str" }, { "name": "retry", @@ -7551,22 +12364,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.SearchNearestEntitiesResponse", - "shortName": "search_nearest_entities" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_feature_values" }, - "description": "Sample for SearchNearestEntities", - "file": "aiplatform_v1beta1_generated_feature_online_store_service_search_nearest_entities_sync.py", + "description": "Sample for DeleteFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_values_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreService_SearchNearestEntities_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeatureValues_sync", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -7581,39 +12394,43 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 56, "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_service_search_nearest_entities_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_values_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceAsyncClient", - "shortName": "FeatureOnlineStoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceAsyncClient.streaming_fetch_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_feature", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.StreamingFetchFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService", - "shortName": "FeatureOnlineStoreService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "StreamingFetchFeatureValues" + "shortName": "DeleteFeature" }, "parameters": [ { - "name": "requests", - "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamingFetchFeatureValuesRequest]" + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -7628,22 +12445,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingFetchFeatureValuesResponse]", - "shortName": "streaming_fetch_feature_values" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_feature" }, - "description": "Sample for StreamingFetchFeatureValues", - "file": "aiplatform_v1beta1_generated_feature_online_store_service_streaming_fetch_feature_values_async.py", + "description": "Sample for DeleteFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreService_StreamingFetchFeatureValues_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeature_async", "segments": [ { - "end": 62, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 62, + "end": 55, "start": 27, "type": "SHORT" }, @@ -7653,43 +12470,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 55, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 56, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 63, - "start": 59, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_service_streaming_fetch_feature_values_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceClient", - "shortName": "FeatureOnlineStoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureOnlineStoreServiceClient.streaming_fetch_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_feature", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService.StreamingFetchFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureOnlineStoreService", - "shortName": "FeatureOnlineStoreService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "StreamingFetchFeatureValues" + "shortName": "DeleteFeature" }, "parameters": [ { - "name": "requests", - "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamingFetchFeatureValuesRequest]" + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -7704,22 +12525,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingFetchFeatureValuesResponse]", - "shortName": "streaming_fetch_feature_values" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_feature" }, - "description": "Sample for StreamingFetchFeatureValues", - "file": "aiplatform_v1beta1_generated_feature_online_store_service_streaming_fetch_feature_values_sync.py", + "description": "Sample for DeleteFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureOnlineStoreService_StreamingFetchFeatureValues_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeature_sync", "segments": [ { - "end": 62, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 62, + "end": 55, "start": 27, "type": "SHORT" }, @@ -7729,56 +12550,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 55, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 56, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 63, - "start": 59, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_online_store_service_streaming_fetch_feature_values_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", - "shortName": "FeatureRegistryServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.create_feature_group", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_featurestore", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.CreateFeatureGroup", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "CreateFeatureGroup" + "shortName": "DeleteFeaturestore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureGroupRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { - "name": "feature_group", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureGroup" - }, - { - "name": "feature_group_id", - "type": "str" + "name": "force", + "type": "bool" }, { "name": "retry", @@ -7794,21 +12611,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_feature_group" + "shortName": "delete_featurestore" }, - "description": "Sample for CreateFeatureGroup", - "file": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_group_async.py", + "description": "Sample for DeleteFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_CreateFeatureGroup_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeaturestore_async", "segments": [ { - "end": 60, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 55, "start": 27, "type": "SHORT" }, @@ -7818,55 +12635,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_group_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", - "shortName": "FeatureRegistryServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.create_feature_group", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_featurestore", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.CreateFeatureGroup", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "CreateFeatureGroup" + "shortName": "DeleteFeaturestore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureGroupRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { - "name": "feature_group", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureGroup" - }, - { - "name": "feature_group_id", - "type": "str" + "name": "force", + "type": "bool" }, { "name": "retry", @@ -7882,21 +12695,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "create_feature_group" + "shortName": "delete_featurestore" }, - "description": "Sample for CreateFeatureGroup", - "file": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_group_sync.py", + "description": "Sample for DeleteFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_CreateFeatureGroup_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeaturestore_sync", "segments": [ { - "end": 60, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 55, "start": 27, "type": "SHORT" }, @@ -7906,55 +12719,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_group_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", - "shortName": "FeatureRegistryServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.create_feature", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.export_feature_values", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.CreateFeature", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "CreateFeature" + "shortName": "ExportFeatureValues" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "feature", - "type": "google.cloud.aiplatform_v1beta1.types.Feature" + "type": "google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest" }, { - "name": "feature_id", + "name": "entity_type", "type": "str" }, { @@ -7971,21 +12776,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_feature" + "shortName": "export_feature_values" }, - "description": "Sample for CreateFeature", - "file": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_async.py", + "description": "Sample for ExportFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_CreateFeature_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ExportFeatureValues_async", "segments": [ { - "end": 56, + "end": 63, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 63, "start": 27, "type": "SHORT" }, @@ -7995,54 +12800,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 53, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 60, + "start": 54, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 64, + "start": 61, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", - "shortName": "FeatureRegistryServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.create_feature", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.export_feature_values", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.CreateFeature", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "CreateFeature" + "shortName": "ExportFeatureValues" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "feature", - "type": "google.cloud.aiplatform_v1beta1.types.Feature" + "type": "google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest" }, { - "name": "feature_id", + "name": "entity_type", "type": "str" }, { @@ -8059,21 +12856,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "create_feature" + "shortName": "export_feature_values" }, - "description": "Sample for CreateFeature", - "file": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_sync.py", + "description": "Sample for ExportFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_CreateFeature_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ExportFeatureValues_sync", "segments": [ { - "end": 56, + "end": 63, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 63, "start": 27, "type": "SHORT" }, @@ -8083,53 +12880,49 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 53, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 60, + "start": 54, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 64, + "start": 61, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_create_feature_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", - "shortName": "FeatureRegistryServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.delete_feature_group", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.get_entity_type", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.DeleteFeatureGroup", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "DeleteFeatureGroup" + "shortName": "GetEntityType" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureGroupRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest" }, { "name": "name", "type": "str" }, - { - "name": "force", - "type": "bool" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -8143,22 +12936,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_feature_group" + "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", + "shortName": "get_entity_type" }, - "description": "Sample for DeleteFeatureGroup", - "file": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_group_async.py", + "description": "Sample for GetEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_DeleteFeatureGroup_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetEntityType_async", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -8173,47 +12966,43 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_group_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", - "shortName": "FeatureRegistryServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.delete_feature_group", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.get_entity_type", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.DeleteFeatureGroup", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "DeleteFeatureGroup" + "shortName": "GetEntityType" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureGroupRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest" }, { "name": "name", "type": "str" }, - { - "name": "force", - "type": "bool" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -8227,22 +13016,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_feature_group" + "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", + "shortName": "get_entity_type" }, - "description": "Sample for DeleteFeatureGroup", - "file": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_group_sync.py", + "description": "Sample for GetEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_DeleteFeatureGroup_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetEntityType_sync", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -8257,39 +13046,39 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_group_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", - "shortName": "FeatureRegistryServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.delete_feature", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.get_feature", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.DeleteFeature", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "DeleteFeature" + "shortName": "GetFeature" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureRequest" }, { "name": "name", @@ -8308,22 +13097,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_feature" + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "get_feature" }, - "description": "Sample for DeleteFeature", - "file": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_async.py", + "description": "Sample for GetFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_get_feature_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_DeleteFeature_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeature_async", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -8338,38 +13127,38 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_get_feature_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", - "shortName": "FeatureRegistryServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.delete_feature", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.get_feature", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.DeleteFeature", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "DeleteFeature" + "shortName": "GetFeature" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureRequest" }, { "name": "name", @@ -8388,22 +13177,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_feature" + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "get_feature" }, - "description": "Sample for DeleteFeature", - "file": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_sync.py", + "description": "Sample for GetFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_get_feature_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_DeleteFeature_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeature_sync", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -8418,39 +13207,39 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_delete_feature_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_get_feature_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", - "shortName": "FeatureRegistryServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.get_feature_group", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.get_featurestore", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.GetFeatureGroup", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "GetFeatureGroup" + "shortName": "GetFeaturestore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureGroupRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest" }, { "name": "name", @@ -8469,14 +13258,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureGroup", - "shortName": "get_feature_group" + "resultType": "google.cloud.aiplatform_v1beta1.types.Featurestore", + "shortName": "get_featurestore" }, - "description": "Sample for GetFeatureGroup", - "file": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_group_async.py", + "description": "Sample for GetFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_GetFeatureGroup_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeaturestore_async", "segments": [ { "end": 51, @@ -8509,28 +13298,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_group_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", - "shortName": "FeatureRegistryServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.get_feature_group", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.get_featurestore", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.GetFeatureGroup", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "GetFeatureGroup" + "shortName": "GetFeaturestore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureGroupRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest" }, { "name": "name", @@ -8549,14 +13338,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.FeatureGroup", - "shortName": "get_feature_group" + "resultType": "google.cloud.aiplatform_v1beta1.types.Featurestore", + "shortName": "get_featurestore" }, - "description": "Sample for GetFeatureGroup", - "file": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_group_sync.py", + "description": "Sample for GetFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_GetFeatureGroup_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeaturestore_sync", "segments": [ { "end": 51, @@ -8589,32 +13378,32 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_group_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", - "shortName": "FeatureRegistryServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.get_feature", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.import_feature_values", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.GetFeature", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "GetFeature" + "shortName": "ImportFeatureValues" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest" }, { - "name": "name", + "name": "entity_type", "type": "str" }, { @@ -8630,22 +13419,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", - "shortName": "get_feature" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_feature_values" }, - "description": "Sample for GetFeature", - "file": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_async.py", + "description": "Sample for ImportFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_GetFeature_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ImportFeatureValues_async", "segments": [ { - "end": 51, + "end": 64, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 64, "start": 27, "type": "SHORT" }, @@ -8655,46 +13444,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 61, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 65, + "start": 62, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", - "shortName": "FeatureRegistryServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.get_feature", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.import_feature_values", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.GetFeature", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "GetFeature" + "shortName": "ImportFeatureValues" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest" }, { - "name": "name", + "name": "entity_type", "type": "str" }, { @@ -8710,22 +13499,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", - "shortName": "get_feature" + "resultType": "google.api_core.operation.Operation", + "shortName": "import_feature_values" }, - "description": "Sample for GetFeature", - "file": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_sync.py", + "description": "Sample for ImportFeatureValues", + "file": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_GetFeature_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ImportFeatureValues_sync", "segments": [ { - "end": 51, + "end": 64, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 64, "start": 27, "type": "SHORT" }, @@ -8735,44 +13524,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 61, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 65, + "start": 62, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_get_feature_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", - "shortName": "FeatureRegistryServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.list_feature_groups", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.list_entity_types", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.ListFeatureGroups", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "ListFeatureGroups" + "shortName": "ListEntityTypes" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureGroupsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest" }, { "name": "parent", @@ -8791,14 +13580,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.feature_registry_service.pagers.ListFeatureGroupsAsyncPager", - "shortName": "list_feature_groups" + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesAsyncPager", + "shortName": "list_entity_types" }, - "description": "Sample for ListFeatureGroups", - "file": "aiplatform_v1beta1_generated_feature_registry_service_list_feature_groups_async.py", + "description": "Sample for ListEntityTypes", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_ListFeatureGroups_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListEntityTypes_async", "segments": [ { "end": 52, @@ -8831,28 +13620,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_list_feature_groups_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", - "shortName": "FeatureRegistryServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.list_feature_groups", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.list_entity_types", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.ListFeatureGroups", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "ListFeatureGroups" + "shortName": "ListEntityTypes" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListFeatureGroupsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest" }, { "name": "parent", @@ -8871,14 +13660,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.feature_registry_service.pagers.ListFeatureGroupsPager", - "shortName": "list_feature_groups" + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesPager", + "shortName": "list_entity_types" }, - "description": "Sample for ListFeatureGroups", - "file": "aiplatform_v1beta1_generated_feature_registry_service_list_feature_groups_sync.py", + "description": "Sample for ListEntityTypes", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_ListFeatureGroups_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListEntityTypes_sync", "segments": [ { "end": 52, @@ -8911,22 +13700,22 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_list_feature_groups_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", - "shortName": "FeatureRegistryServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.list_features", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.list_features", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.ListFeatures", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, "shortName": "ListFeatures" }, @@ -8952,14 +13741,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.feature_registry_service.pagers.ListFeaturesAsyncPager", + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesAsyncPager", "shortName": "list_features" }, "description": "Sample for ListFeatures", - "file": "aiplatform_v1beta1_generated_feature_registry_service_list_features_async.py", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_features_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_ListFeatures_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_async", "segments": [ { "end": 52, @@ -8992,21 +13781,21 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_list_features_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_list_features_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", - "shortName": "FeatureRegistryServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.list_features", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.list_features", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.ListFeatures", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, "shortName": "ListFeatures" }, @@ -9032,14 +13821,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.feature_registry_service.pagers.ListFeaturesPager", + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesPager", "shortName": "list_features" }, "description": "Sample for ListFeatures", - "file": "aiplatform_v1beta1_generated_feature_registry_service_list_features_sync.py", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_features_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_ListFeatures_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_sync", "segments": [ { "end": 52, @@ -9072,37 +13861,33 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_list_features_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_list_features_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", - "shortName": "FeatureRegistryServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.update_feature_group", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.list_featurestores", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.UpdateFeatureGroup", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "UpdateFeatureGroup" + "shortName": "ListFeaturestores" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureGroupRequest" - }, - { - "name": "feature_group", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureGroup" + "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "parent", + "type": "str" }, { "name": "retry", @@ -9117,22 +13902,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_feature_group" + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager", + "shortName": "list_featurestores" }, - "description": "Sample for UpdateFeatureGroup", - "file": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_group_async.py", + "description": "Sample for ListFeaturestores", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_UpdateFeatureGroup_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeaturestores_async", "segments": [ { - "end": 58, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 52, "start": 27, "type": "SHORT" }, @@ -9142,51 +13927,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 49, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_group_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", - "shortName": "FeatureRegistryServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.update_feature_group", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.list_featurestores", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.UpdateFeatureGroup", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "UpdateFeatureGroup" + "shortName": "ListFeaturestores" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureGroupRequest" - }, - { - "name": "feature_group", - "type": "google.cloud.aiplatform_v1beta1.types.FeatureGroup" + "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "parent", + "type": "str" }, { "name": "retry", @@ -9201,22 +13982,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_feature_group" + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresPager", + "shortName": "list_featurestores" }, - "description": "Sample for UpdateFeatureGroup", - "file": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_group_sync.py", + "description": "Sample for ListFeaturestores", + "file": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_UpdateFeatureGroup_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeaturestores_sync", "segments": [ { - "end": 58, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 52, "start": 27, "type": "SHORT" }, @@ -9226,52 +14007,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 49, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_group_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient", - "shortName": "FeatureRegistryServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceAsyncClient.update_feature", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.search_features", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.UpdateFeature", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "UpdateFeature" + "shortName": "SearchFeatures" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest" + "type": "google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest" }, { - "name": "feature", - "type": "google.cloud.aiplatform_v1beta1.types.Feature" + "name": "location", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "query", + "type": "str" }, { "name": "retry", @@ -9286,22 +14067,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_feature" + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesAsyncPager", + "shortName": "search_features" }, - "description": "Sample for UpdateFeature", - "file": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_async.py", + "description": "Sample for SearchFeatures", + "file": "aiplatform_v1beta1_generated_featurestore_service_search_features_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_UpdateFeature_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_SearchFeatures_async", "segments": [ { - "end": 54, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 52, "start": 27, "type": "SHORT" }, @@ -9311,51 +14092,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 45, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_search_features_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient", - "shortName": "FeatureRegistryServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeatureRegistryServiceClient.update_feature", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.search_features", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService.UpdateFeature", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeatureRegistryService", - "shortName": "FeatureRegistryService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "UpdateFeature" + "shortName": "SearchFeatures" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest" + "type": "google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest" }, { - "name": "feature", - "type": "google.cloud.aiplatform_v1beta1.types.Feature" + "name": "location", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "query", + "type": "str" }, { "name": "retry", @@ -9370,22 +14151,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_feature" + "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesPager", + "shortName": "search_features" }, - "description": "Sample for UpdateFeature", - "file": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_sync.py", + "description": "Sample for SearchFeatures", + "file": "aiplatform_v1beta1_generated_featurestore_service_search_features_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeatureRegistryService_UpdateFeature_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_SearchFeatures_sync", "segments": [ { - "end": 54, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 52, "start": 27, "type": "SHORT" }, @@ -9395,48 +14176,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 45, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_feature_registry_service_update_feature_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_search_features_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient", - "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient.read_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.update_entity_type", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", - "shortName": "FeaturestoreOnlineServingService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "ReadFeatureValues" + "shortName": "UpdateEntityType" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest" }, { "name": "entity_type", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -9451,22 +14236,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse", - "shortName": "read_feature_values" + "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", + "shortName": "update_entity_type" }, - "description": "Sample for ReadFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_async.py", + "description": "Sample for UpdateEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateEntityType_async", "segments": [ { - "end": 56, + "end": 50, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 50, "start": 27, "type": "SHORT" }, @@ -9476,47 +14261,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 47, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 51, + "start": 48, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient", - "shortName": "FeaturestoreOnlineServingServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient.read_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.update_entity_type", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.ReadFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", - "shortName": "FeaturestoreOnlineServingService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "ReadFeatureValues" + "shortName": "UpdateEntityType" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest" }, { "name": "entity_type", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -9531,22 +14320,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse", - "shortName": "read_feature_values" + "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", + "shortName": "update_entity_type" }, - "description": "Sample for ReadFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_sync.py", + "description": "Sample for UpdateEntityType", + "file": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_ReadFeatureValues_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateEntityType_sync", "segments": [ { - "end": 56, + "end": 50, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 50, "start": 27, "type": "SHORT" }, @@ -9556,48 +14345,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 47, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 51, + "start": 48, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_read_feature_values_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient", - "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient.streaming_read_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.update_feature", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.StreamingReadFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", - "shortName": "FeaturestoreOnlineServingService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "StreamingReadFeatureValues" + "shortName": "UpdateFeature" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest" }, { - "name": "entity_type", - "type": "str" + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -9612,22 +14405,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]", - "shortName": "streaming_read_feature_values" + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "update_feature" }, - "description": "Sample for StreamingReadFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py", + "description": "Sample for UpdateFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_update_feature_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeature_async", "segments": [ { - "end": 57, + "end": 50, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 50, "start": 27, "type": "SHORT" }, @@ -9637,47 +14430,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 47, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 54, + "end": 51, + "start": 48, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_update_feature_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient", - "shortName": "FeaturestoreOnlineServingServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient.streaming_read_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.update_feature", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.StreamingReadFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", - "shortName": "FeaturestoreOnlineServingService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "StreamingReadFeatureValues" + "shortName": "UpdateFeature" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.StreamingReadFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest" }, { - "name": "entity_type", - "type": "str" + "name": "feature", + "type": "google.cloud.aiplatform_v1beta1.types.Feature" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -9692,22 +14489,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadFeatureValuesResponse]", - "shortName": "streaming_read_feature_values" + "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", + "shortName": "update_feature" }, - "description": "Sample for StreamingReadFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py", + "description": "Sample for UpdateFeature", + "file": "aiplatform_v1beta1_generated_featurestore_service_update_feature_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_StreamingReadFeatureValues_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeature_sync", "segments": [ { - "end": 57, + "end": 50, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 50, "start": 27, "type": "SHORT" }, @@ -9717,52 +14514,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 47, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 54, + "end": 51, + "start": 48, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_streaming_read_feature_values_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_update_feature_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient", - "shortName": "FeaturestoreOnlineServingServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", + "shortName": "FeaturestoreServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceAsyncClient.write_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.update_featurestore", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.WriteFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", - "shortName": "FeaturestoreOnlineServingService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "WriteFeatureValues" + "shortName": "UpdateFeaturestore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.WriteFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest" }, { - "name": "entity_type", - "type": "str" + "name": "featurestore", + "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" }, { - "name": "payloads", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.WriteFeatureValuesPayload]" + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -9777,22 +14574,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.WriteFeatureValuesResponse", - "shortName": "write_feature_values" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_featurestore" }, - "description": "Sample for WriteFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_write_feature_values_async.py", + "description": "Sample for UpdateFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_WriteFeatureValues_async", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeaturestore_async", "segments": [ { - "end": 55, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 54, "start": 27, "type": "SHORT" }, @@ -9802,51 +14599,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 51, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_write_feature_values_async.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient", - "shortName": "FeaturestoreOnlineServingServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", + "shortName": "FeaturestoreServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreOnlineServingServiceClient.write_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.update_featurestore", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService.WriteFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreOnlineServingService", - "shortName": "FeaturestoreOnlineServingService" + "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", + "shortName": "FeaturestoreService" }, - "shortName": "WriteFeatureValues" + "shortName": "UpdateFeaturestore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.WriteFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest" }, { - "name": "entity_type", - "type": "str" + "name": "featurestore", + "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" }, { - "name": "payloads", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.WriteFeatureValuesPayload]" + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -9861,22 +14658,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.WriteFeatureValuesResponse", - "shortName": "write_feature_values" + "resultType": "google.api_core.operation.Operation", + "shortName": "update_featurestore" }, - "description": "Sample for WriteFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_online_serving_service_write_feature_values_sync.py", + "description": "Sample for UpdateFeaturestore", + "file": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreOnlineServingService_WriteFeatureValues_sync", + "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeaturestore_sync", "segments": [ { - "end": 55, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 54, "start": 27, "type": "SHORT" }, @@ -9886,52 +14683,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 51, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_online_serving_service_write_feature_values_sync.py" + "title": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.batch_create_features", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.create_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "BatchCreateFeatures" + "shortName": "CreateIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest" }, { "name": "parent", "type": "str" }, { - "name": "requests", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]" + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" }, { "name": "retry", @@ -9947,21 +14744,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "batch_create_features" + "shortName": "create_index_endpoint" }, - "description": "Sample for BatchCreateFeatures", - "file": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_async.py", + "description": "Sample for CreateIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchCreateFeatures_async", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_CreateIndexEndpoint_async", "segments": [ { - "end": 60, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 59, "start": 27, "type": "SHORT" }, @@ -9971,51 +14768,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_async.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.batch_create_features", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.create_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchCreateFeatures", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "BatchCreateFeatures" + "shortName": "CreateIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateFeaturesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest" }, { "name": "parent", "type": "str" }, { - "name": "requests", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest]" + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" }, { "name": "retry", @@ -10031,21 +14828,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "batch_create_features" + "shortName": "create_index_endpoint" }, - "description": "Sample for BatchCreateFeatures", - "file": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_sync.py", + "description": "Sample for CreateIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchCreateFeatures_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_CreateIndexEndpoint_sync", "segments": [ { - "end": 60, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 59, "start": 27, "type": "SHORT" }, @@ -10055,47 +14852,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_batch_create_features_sync.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.batch_read_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.delete_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "BatchReadFeatureValues" + "shortName": "DeleteIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest" }, { - "name": "featurestore", + "name": "name", "type": "str" }, { @@ -10112,21 +14909,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "batch_read_feature_values" + "shortName": "delete_index_endpoint" }, - "description": "Sample for BatchReadFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_async.py", + "description": "Sample for DeleteIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_async", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeleteIndexEndpoint_async", "segments": [ { - "end": 68, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 68, + "end": 55, "start": 27, "type": "SHORT" }, @@ -10136,46 +14933,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 58, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 65, - "start": 59, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 69, - "start": 66, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_async.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.batch_read_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.delete_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.BatchReadFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "BatchReadFeatureValues" + "shortName": "DeleteIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchReadFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest" }, { - "name": "featurestore", + "name": "name", "type": "str" }, { @@ -10192,21 +14989,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "batch_read_feature_values" + "shortName": "delete_index_endpoint" }, - "description": "Sample for BatchReadFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_sync.py", + "description": "Sample for DeleteIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_BatchReadFeatureValues_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeleteIndexEndpoint_sync", "segments": [ { - "end": 68, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 68, + "end": 55, "start": 27, "type": "SHORT" }, @@ -10216,56 +15013,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 58, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 65, - "start": 59, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 69, - "start": 66, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_batch_read_feature_values_sync.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.create_entity_type", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.deploy_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "CreateEntityType" + "shortName": "DeployIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeployIndexRequest" }, { - "name": "parent", + "name": "index_endpoint", "type": "str" }, { - "name": "entity_type", - "type": "google.cloud.aiplatform_v1beta1.types.EntityType" - }, - { - "name": "entity_type_id", - "type": "str" + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" }, { "name": "retry", @@ -10281,21 +15074,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_entity_type" + "shortName": "deploy_index" }, - "description": "Sample for CreateEntityType", - "file": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_async.py", + "description": "Sample for DeployIndex", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateEntityType_async", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeployIndex_async", "segments": [ { - "end": 56, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 60, "start": 27, "type": "SHORT" }, @@ -10305,55 +15098,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_async.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.create_entity_type", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.deploy_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateEntityType", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "CreateEntityType" + "shortName": "DeployIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateEntityTypeRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeployIndexRequest" }, { - "name": "parent", + "name": "index_endpoint", "type": "str" }, { - "name": "entity_type", - "type": "google.cloud.aiplatform_v1beta1.types.EntityType" - }, - { - "name": "entity_type_id", - "type": "str" + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" }, { "name": "retry", @@ -10369,21 +15158,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "create_entity_type" + "shortName": "deploy_index" }, - "description": "Sample for CreateEntityType", - "file": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_sync.py", + "description": "Sample for DeployIndex", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateEntityType_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeployIndex_sync", "segments": [ { - "end": 56, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 60, "start": 27, "type": "SHORT" }, @@ -10393,55 +15182,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_create_entity_type_sync.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.create_feature", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.get_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "CreateFeature" + "shortName": "GetIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "feature", - "type": "google.cloud.aiplatform_v1beta1.types.Feature" + "type": "google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest" }, { - "name": "feature_id", + "name": "name", "type": "str" }, { @@ -10457,22 +15238,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_feature" + "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", + "shortName": "get_index_endpoint" }, - "description": "Sample for CreateFeature", - "file": "aiplatform_v1beta1_generated_featurestore_service_create_feature_async.py", + "description": "Sample for GetIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeature_async", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_GetIndexEndpoint_async", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -10482,54 +15263,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_create_feature_async.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.create_feature", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.get_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeature", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "CreateFeature" + "shortName": "GetIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateFeatureRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "feature", - "type": "google.cloud.aiplatform_v1beta1.types.Feature" + "type": "google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest" }, { - "name": "feature_id", + "name": "name", "type": "str" }, { @@ -10545,22 +15318,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_feature" + "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", + "shortName": "get_index_endpoint" }, - "description": "Sample for CreateFeature", - "file": "aiplatform_v1beta1_generated_featurestore_service_create_feature_sync.py", + "description": "Sample for GetIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeature_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_GetIndexEndpoint_sync", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -10570,57 +15343,49 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_create_feature_sync.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.create_featurestore", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.list_index_endpoints", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "CreateFeaturestore" + "shortName": "ListIndexEndpoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "featurestore", - "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" - }, - { - "name": "featurestore_id", - "type": "str" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -10634,22 +15399,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_featurestore" + "resultType": "google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager", + "shortName": "list_index_endpoints" }, - "description": "Sample for CreateFeaturestore", - "file": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_async.py", + "description": "Sample for ListIndexEndpoints", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeaturestore_async", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_ListIndexEndpoints_async", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -10659,56 +15424,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_async.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.create_featurestore", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.list_index_endpoints", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.CreateFeaturestore", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "CreateFeaturestore" + "shortName": "ListIndexEndpoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateFeaturestoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "featurestore", - "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" - }, - { - "name": "featurestore_id", - "type": "str" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -10722,22 +15479,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_featurestore" + "resultType": "google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsPager", + "shortName": "list_index_endpoints" }, - "description": "Sample for CreateFeaturestore", - "file": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_sync.py", + "description": "Sample for ListIndexEndpoints", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_CreateFeaturestore_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_ListIndexEndpoints_sync", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -10747,52 +15504,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_create_featurestore_sync.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_entity_type", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.mutate_deployed_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteEntityType", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "DeleteEntityType" + "shortName": "MutateDeployedIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest" + "type": "google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest" }, { - "name": "name", + "name": "index_endpoint", "type": "str" }, { - "name": "force", - "type": "bool" + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" }, { "name": "retry", @@ -10808,21 +15565,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_entity_type" + "shortName": "mutate_deployed_index" }, - "description": "Sample for DeleteEntityType", - "file": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_async.py", + "description": "Sample for MutateDeployedIndex", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteEntityType_async", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_MutateDeployedIndex_async", "segments": [ { - "end": 55, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 60, "start": 27, "type": "SHORT" }, @@ -10832,51 +15589,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_async.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_entity_type", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.mutate_deployed_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteEntityType", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "DeleteEntityType" + "shortName": "MutateDeployedIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteEntityTypeRequest" + "type": "google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest" }, { - "name": "name", + "name": "index_endpoint", "type": "str" }, { - "name": "force", - "type": "bool" + "name": "deployed_index", + "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" }, { "name": "retry", @@ -10892,21 +15649,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_entity_type" + "shortName": "mutate_deployed_index" }, - "description": "Sample for DeleteEntityType", - "file": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_sync.py", + "description": "Sample for MutateDeployedIndex", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteEntityType_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_MutateDeployedIndex_sync", "segments": [ { - "end": 55, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 60, "start": 27, "type": "SHORT" }, @@ -10916,47 +15673,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_delete_entity_type_sync.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.undeploy_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "DeleteFeatureValues" + "shortName": "UndeployIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest" }, { - "name": "entity_type", + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index_id", "type": "str" }, { @@ -10973,21 +15734,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_feature_values" + "shortName": "undeploy_index" }, - "description": "Sample for DeleteFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_values_async.py", + "description": "Sample for UndeployIndex", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeatureValues_async", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UndeployIndex_async", "segments": [ { - "end": 59, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 56, "start": 27, "type": "SHORT" }, @@ -10997,46 +15758,50 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_values_async.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.undeploy_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "DeleteFeatureValues" + "shortName": "UndeployIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest" }, { - "name": "entity_type", + "name": "index_endpoint", + "type": "str" + }, + { + "name": "deployed_index_id", "type": "str" }, { @@ -11053,21 +15818,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_feature_values" + "shortName": "undeploy_index" }, - "description": "Sample for DeleteFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_values_sync.py", + "description": "Sample for UndeployIndex", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeatureValues_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UndeployIndex_sync", "segments": [ { - "end": 59, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 56, "start": 27, "type": "SHORT" }, @@ -11077,48 +15842,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_values_sync.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", + "shortName": "IndexEndpointServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_feature", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.update_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "DeleteFeature" + "shortName": "UpdateIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest" }, { - "name": "name", - "type": "str" + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -11133,22 +15902,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_feature" + "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", + "shortName": "update_index_endpoint" }, - "description": "Sample for DeleteFeature", - "file": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_async.py", + "description": "Sample for UpdateIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeature_async", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UpdateIndexEndpoint_async", "segments": [ { - "end": 55, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 54, "start": 27, "type": "SHORT" }, @@ -11158,47 +15927,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 51, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_async.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", + "shortName": "IndexEndpointServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_feature", + "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.update_index_endpoint", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeature", + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", + "shortName": "IndexEndpointService" }, - "shortName": "DeleteFeature" + "shortName": "UpdateIndexEndpoint" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeatureRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest" }, { - "name": "name", - "type": "str" + "name": "index_endpoint", + "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -11213,22 +15986,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_feature" + "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", + "shortName": "update_index_endpoint" }, - "description": "Sample for DeleteFeature", - "file": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_sync.py", + "description": "Sample for UpdateIndexEndpoint", + "file": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeature_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UpdateIndexEndpoint_sync", "segments": [ { - "end": 55, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 54, "start": 27, "type": "SHORT" }, @@ -11238,52 +16011,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 51, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_delete_feature_sync.py" + "title": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.delete_featurestore", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.create_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.CreateIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "DeleteFeaturestore" + "shortName": "CreateIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { - "name": "force", - "type": "bool" + "name": "index", + "type": "google.cloud.aiplatform_v1beta1.types.Index" }, { "name": "retry", @@ -11299,21 +16072,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_featurestore" + "shortName": "create_index" }, - "description": "Sample for DeleteFeaturestore", - "file": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_async.py", + "description": "Sample for CreateIndex", + "file": "aiplatform_v1beta1_generated_index_service_create_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeaturestore_async", + "regionTag": "aiplatform_v1beta1_generated_IndexService_CreateIndex_async", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -11323,51 +16096,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_async.py" + "title": "aiplatform_v1beta1_generated_index_service_create_index_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.delete_featurestore", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.create_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.DeleteFeaturestore", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.CreateIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "DeleteFeaturestore" + "shortName": "CreateIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteFeaturestoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { - "name": "force", - "type": "bool" + "name": "index", + "type": "google.cloud.aiplatform_v1beta1.types.Index" }, { "name": "retry", @@ -11383,21 +16156,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_featurestore" + "shortName": "create_index" }, - "description": "Sample for DeleteFeaturestore", - "file": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_sync.py", + "description": "Sample for CreateIndex", + "file": "aiplatform_v1beta1_generated_index_service_create_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_DeleteFeaturestore_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexService_CreateIndex_sync", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -11407,47 +16180,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_delete_featurestore_sync.py" + "title": "aiplatform_v1beta1_generated_index_service_create_index_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.export_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.delete_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "ExportFeatureValues" + "shortName": "DeleteIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest" }, { - "name": "entity_type", + "name": "name", "type": "str" }, { @@ -11464,21 +16237,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "export_feature_values" + "shortName": "delete_index" }, - "description": "Sample for ExportFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_async.py", + "description": "Sample for DeleteIndex", + "file": "aiplatform_v1beta1_generated_index_service_delete_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ExportFeatureValues_async", + "regionTag": "aiplatform_v1beta1_generated_IndexService_DeleteIndex_async", "segments": [ { - "end": 63, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 63, + "end": 55, "start": 27, "type": "SHORT" }, @@ -11488,46 +16261,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 53, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 60, - "start": 54, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 64, - "start": 61, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_async.py" + "title": "aiplatform_v1beta1_generated_index_service_delete_index_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.export_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.delete_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ExportFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "ExportFeatureValues" + "shortName": "DeleteIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ExportFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest" }, { - "name": "entity_type", + "name": "name", "type": "str" }, { @@ -11544,21 +16317,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "export_feature_values" + "shortName": "delete_index" }, - "description": "Sample for ExportFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_sync.py", + "description": "Sample for DeleteIndex", + "file": "aiplatform_v1beta1_generated_index_service_delete_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ExportFeatureValues_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexService_DeleteIndex_sync", "segments": [ { - "end": 63, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 63, + "end": 55, "start": 27, "type": "SHORT" }, @@ -11568,44 +16341,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 53, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 60, - "start": 54, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 64, - "start": 61, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_export_feature_values_sync.py" + "title": "aiplatform_v1beta1_generated_index_service_delete_index_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.get_entity_type", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.get_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.GetIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "GetEntityType" + "shortName": "GetIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetIndexRequest" }, { "name": "name", @@ -11624,14 +16397,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", - "shortName": "get_entity_type" + "resultType": "google.cloud.aiplatform_v1beta1.types.Index", + "shortName": "get_index" }, - "description": "Sample for GetEntityType", - "file": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_async.py", + "description": "Sample for GetIndex", + "file": "aiplatform_v1beta1_generated_index_service_get_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetEntityType_async", + "regionTag": "aiplatform_v1beta1_generated_IndexService_GetIndex_async", "segments": [ { "end": 51, @@ -11664,28 +16437,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_async.py" + "title": "aiplatform_v1beta1_generated_index_service_get_index_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.get_entity_type", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.get_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetEntityType", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.GetIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "GetEntityType" + "shortName": "GetIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetEntityTypeRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetIndexRequest" }, { "name": "name", @@ -11704,14 +16477,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", - "shortName": "get_entity_type" + "resultType": "google.cloud.aiplatform_v1beta1.types.Index", + "shortName": "get_index" }, - "description": "Sample for GetEntityType", - "file": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_sync.py", + "description": "Sample for GetIndex", + "file": "aiplatform_v1beta1_generated_index_service_get_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetEntityType_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexService_GetIndex_sync", "segments": [ { "end": 51, @@ -11744,32 +16517,32 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_get_entity_type_sync.py" + "title": "aiplatform_v1beta1_generated_index_service_get_index_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.get_feature", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.list_indexes", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.ListIndexes", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "GetFeature" + "shortName": "ListIndexes" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListIndexesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -11785,22 +16558,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", - "shortName": "get_feature" + "resultType": "google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesAsyncPager", + "shortName": "list_indexes" }, - "description": "Sample for GetFeature", - "file": "aiplatform_v1beta1_generated_featurestore_service_get_feature_async.py", + "description": "Sample for ListIndexes", + "file": "aiplatform_v1beta1_generated_index_service_list_indexes_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeature_async", + "regionTag": "aiplatform_v1beta1_generated_IndexService_ListIndexes_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -11820,36 +16593,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_get_feature_async.py" + "title": "aiplatform_v1beta1_generated_index_service_list_indexes_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.get_feature", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.list_indexes", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeature", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.ListIndexes", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "GetFeature" + "shortName": "ListIndexes" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeatureRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListIndexesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -11865,22 +16638,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", - "shortName": "get_feature" + "resultType": "google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesPager", + "shortName": "list_indexes" }, - "description": "Sample for GetFeature", - "file": "aiplatform_v1beta1_generated_featurestore_service_get_feature_sync.py", + "description": "Sample for ListIndexes", + "file": "aiplatform_v1beta1_generated_index_service_list_indexes_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeature_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexService_ListIndexes_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -11900,38 +16673,34 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_get_feature_sync.py" + "title": "aiplatform_v1beta1_generated_index_service_list_indexes_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.get_featurestore", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.remove_datapoints", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.RemoveDatapoints", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "GetFeaturestore" + "shortName": "RemoveDatapoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.RemoveDatapointsRequest" }, { "name": "retry", @@ -11946,14 +16715,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Featurestore", - "shortName": "get_featurestore" + "resultType": "google.cloud.aiplatform_v1beta1.types.RemoveDatapointsResponse", + "shortName": "remove_datapoints" }, - "description": "Sample for GetFeaturestore", - "file": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_async.py", + "description": "Sample for RemoveDatapoints", + "file": "aiplatform_v1beta1_generated_index_service_remove_datapoints_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeaturestore_async", + "regionTag": "aiplatform_v1beta1_generated_IndexService_RemoveDatapoints_async", "segments": [ { "end": 51, @@ -11986,32 +16755,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_async.py" + "title": "aiplatform_v1beta1_generated_index_service_remove_datapoints_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.get_featurestore", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.remove_datapoints", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.GetFeaturestore", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.RemoveDatapoints", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "GetFeaturestore" + "shortName": "RemoveDatapoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetFeaturestoreRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.RemoveDatapointsRequest" }, { "name": "retry", @@ -12026,14 +16791,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Featurestore", - "shortName": "get_featurestore" + "resultType": "google.cloud.aiplatform_v1beta1.types.RemoveDatapointsResponse", + "shortName": "remove_datapoints" }, - "description": "Sample for GetFeaturestore", - "file": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_sync.py", + "description": "Sample for RemoveDatapoints", + "file": "aiplatform_v1beta1_generated_index_service_remove_datapoints_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_GetFeaturestore_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexService_RemoveDatapoints_sync", "segments": [ { "end": 51, @@ -12066,33 +16831,37 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_get_featurestore_sync.py" + "title": "aiplatform_v1beta1_generated_index_service_remove_datapoints_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.import_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.update_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "ImportFeatureValues" + "shortName": "UpdateIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest" }, { - "name": "entity_type", - "type": "str" + "name": "index", + "type": "google.cloud.aiplatform_v1beta1.types.Index" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -12108,21 +16877,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "import_feature_values" + "shortName": "update_index" }, - "description": "Sample for ImportFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_async.py", + "description": "Sample for UpdateIndex", + "file": "aiplatform_v1beta1_generated_index_service_update_index_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ImportFeatureValues_async", + "regionTag": "aiplatform_v1beta1_generated_IndexService_UpdateIndex_async", "segments": [ { - "end": 64, + "end": 58, "start": 27, "type": "FULL" }, { - "end": 64, + "end": 58, "start": 27, "type": "SHORT" }, @@ -12132,47 +16901,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 61, - "start": 55, + "end": 55, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 65, - "start": 62, + "end": 59, + "start": 56, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_async.py" + "title": "aiplatform_v1beta1_generated_index_service_update_index_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.import_feature_values", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.update_index", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ImportFeatureValues", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "ImportFeatureValues" + "shortName": "UpdateIndex" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ImportFeatureValuesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest" }, { - "name": "entity_type", - "type": "str" + "name": "index", + "type": "google.cloud.aiplatform_v1beta1.types.Index" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -12188,21 +16961,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "import_feature_values" + "shortName": "update_index" }, - "description": "Sample for ImportFeatureValues", - "file": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_sync.py", + "description": "Sample for UpdateIndex", + "file": "aiplatform_v1beta1_generated_index_service_update_index_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ImportFeatureValues_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexService_UpdateIndex_sync", "segments": [ { - "end": 64, + "end": 58, "start": 27, "type": "FULL" }, { - "end": 64, + "end": 58, "start": 27, "type": "SHORT" }, @@ -12212,48 +16985,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 61, - "start": 55, + "end": 55, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 65, - "start": 62, + "end": 59, + "start": 56, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_import_feature_values_sync.py" + "title": "aiplatform_v1beta1_generated_index_service_update_index_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", + "shortName": "IndexServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.list_entity_types", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.upsert_datapoints", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.UpsertDatapoints", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "ListEntityTypes" + "shortName": "UpsertDatapoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest" - }, - { - "name": "parent", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.UpsertDatapointsRequest" }, { "name": "retry", @@ -12268,22 +17037,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesAsyncPager", - "shortName": "list_entity_types" + "resultType": "google.cloud.aiplatform_v1beta1.types.UpsertDatapointsResponse", + "shortName": "upsert_datapoints" }, - "description": "Sample for ListEntityTypes", - "file": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_async.py", + "description": "Sample for UpsertDatapoints", + "file": "aiplatform_v1beta1_generated_index_service_upsert_datapoints_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListEntityTypes_async", + "regionTag": "aiplatform_v1beta1_generated_IndexService_UpsertDatapoints_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -12303,37 +17072,33 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_async.py" + "title": "aiplatform_v1beta1_generated_index_service_upsert_datapoints_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", + "shortName": "IndexServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.list_entity_types", + "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.upsert_datapoints", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListEntityTypes", + "fullName": "google.cloud.aiplatform.v1beta1.IndexService.UpsertDatapoints", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.IndexService", + "shortName": "IndexService" }, - "shortName": "ListEntityTypes" + "shortName": "UpsertDatapoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListEntityTypesRequest" - }, - { - "name": "parent", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.UpsertDatapointsRequest" }, { "name": "retry", @@ -12348,22 +17113,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListEntityTypesPager", - "shortName": "list_entity_types" + "resultType": "google.cloud.aiplatform_v1beta1.types.UpsertDatapointsResponse", + "shortName": "upsert_datapoints" }, - "description": "Sample for ListEntityTypes", - "file": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_sync.py", + "description": "Sample for UpsertDatapoints", + "file": "aiplatform_v1beta1_generated_index_service_upsert_datapoints_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListEntityTypes_sync", + "regionTag": "aiplatform_v1beta1_generated_IndexService_UpsertDatapoints_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -12383,37 +17148,37 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_list_entity_types_sync.py" + "title": "aiplatform_v1beta1_generated_index_service_upsert_datapoints_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.list_features", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "ListFeatures" + "shortName": "CancelBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -12429,22 +17194,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesAsyncPager", - "shortName": "list_features" + "shortName": "cancel_batch_prediction_job" }, - "description": "Sample for ListFeatures", - "file": "aiplatform_v1beta1_generated_featurestore_service_list_features_async.py", + "description": "Sample for CancelBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelBatchPredictionJob_async", "segments": [ { - "end": 52, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 49, "start": 27, "type": "SHORT" }, @@ -12459,41 +17223,39 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_list_features_async.py" + "title": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.list_features", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeatures", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "ListFeatures" + "shortName": "CancelBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -12509,22 +17271,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturesPager", - "shortName": "list_features" + "shortName": "cancel_batch_prediction_job" }, - "description": "Sample for ListFeatures", - "file": "aiplatform_v1beta1_generated_featurestore_service_list_features_sync.py", + "description": "Sample for CancelBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeatures_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelBatchPredictionJob_sync", "segments": [ { - "end": 52, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 49, "start": 27, "type": "SHORT" }, @@ -12539,42 +17300,40 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_list_features_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.list_featurestores", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "ListFeaturestores" + "shortName": "CancelCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -12590,22 +17349,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresAsyncPager", - "shortName": "list_featurestores" + "shortName": "cancel_custom_job" }, - "description": "Sample for ListFeaturestores", - "file": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_async.py", + "description": "Sample for CancelCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeaturestores_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelCustomJob_async", "segments": [ { - "end": 52, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 49, "start": 27, "type": "SHORT" }, @@ -12620,41 +17378,39 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_async.py" + "title": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.list_featurestores", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.ListFeaturestores", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "ListFeaturestores" + "shortName": "CancelCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListFeaturestoresRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -12670,22 +17426,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.ListFeaturestoresPager", - "shortName": "list_featurestores" + "shortName": "cancel_custom_job" }, - "description": "Sample for ListFeaturestores", - "file": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_sync.py", + "description": "Sample for CancelCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_ListFeaturestores_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelCustomJob_sync", "segments": [ { - "end": 52, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 49, "start": 27, "type": "SHORT" }, @@ -12700,46 +17455,40 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_list_featurestores_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.search_features", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "SearchFeatures" + "shortName": "CancelDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest" - }, - { - "name": "location", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest" }, { - "name": "query", + "name": "name", "type": "str" }, { @@ -12755,22 +17504,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesAsyncPager", - "shortName": "search_features" + "shortName": "cancel_data_labeling_job" }, - "description": "Sample for SearchFeatures", - "file": "aiplatform_v1beta1_generated_featurestore_service_search_features_async.py", + "description": "Sample for CancelDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_SearchFeatures_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelDataLabelingJob_async", "segments": [ { - "end": 52, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 49, "start": 27, "type": "SHORT" }, @@ -12785,45 +17533,39 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_search_features_async.py" + "title": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.search_features", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.SearchFeatures", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "SearchFeatures" + "shortName": "CancelDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.SearchFeaturesRequest" - }, - { - "name": "location", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest" }, { - "name": "query", + "name": "name", "type": "str" }, { @@ -12839,22 +17581,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.featurestore_service.pagers.SearchFeaturesPager", - "shortName": "search_features" + "shortName": "cancel_data_labeling_job" }, - "description": "Sample for SearchFeatures", - "file": "aiplatform_v1beta1_generated_featurestore_service_search_features_sync.py", + "description": "Sample for CancelDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_SearchFeatures_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelDataLabelingJob_sync", "segments": [ { - "end": 52, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 49, "start": 27, "type": "SHORT" }, @@ -12869,47 +17610,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_search_features_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.update_entity_type", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UpdateEntityType" + "shortName": "CancelHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest" - }, - { - "name": "entity_type", - "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + "type": "google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -12924,22 +17659,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", - "shortName": "update_entity_type" + "shortName": "cancel_hyperparameter_tuning_job" }, - "description": "Sample for UpdateEntityType", - "file": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_async.py", + "description": "Sample for CancelHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateEntityType_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelHyperparameterTuningJob_async", "segments": [ { - "end": 50, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 49, "start": 27, "type": "SHORT" }, @@ -12949,51 +17683,45 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_async.py" + "title": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.update_entity_type", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateEntityType", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UpdateEntityType" + "shortName": "CancelHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateEntityTypeRequest" - }, - { - "name": "entity_type", - "type": "google.cloud.aiplatform_v1beta1.types.EntityType" + "type": "google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -13008,22 +17736,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.EntityType", - "shortName": "update_entity_type" + "shortName": "cancel_hyperparameter_tuning_job" }, - "description": "Sample for UpdateEntityType", - "file": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_sync.py", + "description": "Sample for CancelHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateEntityType_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelHyperparameterTuningJob_sync", "segments": [ { - "end": 50, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 49, "start": 27, "type": "SHORT" }, @@ -13033,52 +17760,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_update_entity_type_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.update_feature", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelNasJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UpdateFeature" + "shortName": "CancelNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest" - }, - { - "name": "feature", - "type": "google.cloud.aiplatform_v1beta1.types.Feature" + "type": "google.cloud.aiplatform_v1beta1.types.CancelNasJobRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -13093,22 +17814,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", - "shortName": "update_feature" + "shortName": "cancel_nas_job" }, - "description": "Sample for UpdateFeature", - "file": "aiplatform_v1beta1_generated_featurestore_service_update_feature_async.py", + "description": "Sample for CancelNasJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_nas_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeature_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelNasJob_async", "segments": [ { - "end": 50, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 49, "start": 27, "type": "SHORT" }, @@ -13118,51 +17838,45 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_update_feature_async.py" + "title": "aiplatform_v1beta1_generated_job_service_cancel_nas_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.update_feature", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeature", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelNasJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UpdateFeature" + "shortName": "CancelNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeatureRequest" - }, - { - "name": "feature", - "type": "google.cloud.aiplatform_v1beta1.types.Feature" + "type": "google.cloud.aiplatform_v1beta1.types.CancelNasJobRequest" }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -13177,22 +17891,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Feature", - "shortName": "update_feature" + "shortName": "cancel_nas_job" }, - "description": "Sample for UpdateFeature", - "file": "aiplatform_v1beta1_generated_featurestore_service_update_feature_sync.py", + "description": "Sample for CancelNasJob", + "file": "aiplatform_v1beta1_generated_job_service_cancel_nas_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeature_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_CancelNasJob_sync", "segments": [ { - "end": 50, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 49, "start": 27, "type": "SHORT" }, @@ -13202,52 +17915,50 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_update_feature_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_cancel_nas_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient", - "shortName": "FeaturestoreServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceAsyncClient.update_featurestore", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UpdateFeaturestore" + "shortName": "CreateBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest" }, { - "name": "featurestore", - "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "batch_prediction_job", + "type": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob" }, { "name": "retry", @@ -13262,22 +17973,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_featurestore" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", + "shortName": "create_batch_prediction_job" }, - "description": "Sample for UpdateFeaturestore", - "file": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_async.py", + "description": "Sample for CreateBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeaturestore_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateBatchPredictionJob_async", "segments": [ { - "end": 54, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 59, "start": 27, "type": "SHORT" }, @@ -13287,51 +17998,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 53, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 45, + "end": 56, + "start": 54, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_async.py" + "title": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient", - "shortName": "FeaturestoreServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.FeaturestoreServiceClient.update_featurestore", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService.UpdateFeaturestore", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.FeaturestoreService", - "shortName": "FeaturestoreService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UpdateFeaturestore" + "shortName": "CreateBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateFeaturestoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest" }, { - "name": "featurestore", - "type": "google.cloud.aiplatform_v1beta1.types.Featurestore" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "batch_prediction_job", + "type": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob" }, { "name": "retry", @@ -13346,22 +18057,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_featurestore" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", + "shortName": "create_batch_prediction_job" }, - "description": "Sample for UpdateFeaturestore", - "file": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_sync.py", + "description": "Sample for CreateBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_FeaturestoreService_UpdateFeaturestore_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateBatchPredictionJob_sync", "segments": [ { - "end": 54, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 59, "start": 27, "type": "SHORT" }, @@ -13371,52 +18082,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 53, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 45, + "end": 56, + "start": 54, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_featurestore_service_update_featurestore_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", - "shortName": "IndexEndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.create_index_endpoint", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "CreateIndexEndpoint" + "shortName": "CreateCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest" }, { "name": "parent", "type": "str" }, { - "name": "index_endpoint", - "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + "name": "custom_job", + "type": "google.cloud.aiplatform_v1beta1.types.CustomJob" }, { "name": "retry", @@ -13431,22 +18142,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_index_endpoint" + "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", + "shortName": "create_custom_job" }, - "description": "Sample for CreateIndexEndpoint", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_async.py", + "description": "Sample for CreateCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_create_custom_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_CreateIndexEndpoint_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateCustomJob_async", "segments": [ { - "end": 59, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 56, "start": 27, "type": "SHORT" }, @@ -13456,51 +18167,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_async.py" + "title": "aiplatform_v1beta1_generated_job_service_create_custom_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", - "shortName": "IndexEndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.create_index_endpoint", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "CreateIndexEndpoint" + "shortName": "CreateCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest" }, { "name": "parent", "type": "str" }, { - "name": "index_endpoint", - "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + "name": "custom_job", + "type": "google.cloud.aiplatform_v1beta1.types.CustomJob" }, { "name": "retry", @@ -13515,22 +18226,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_index_endpoint" + "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", + "shortName": "create_custom_job" }, - "description": "Sample for CreateIndexEndpoint", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_sync.py", + "description": "Sample for CreateCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_create_custom_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_CreateIndexEndpoint_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateCustomJob_sync", "segments": [ { - "end": 59, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 56, "start": 27, "type": "SHORT" }, @@ -13540,49 +18251,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_create_index_endpoint_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_create_custom_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", - "shortName": "IndexEndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.delete_index_endpoint", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "DeleteIndexEndpoint" + "shortName": "CreateDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "data_labeling_job", + "type": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -13596,22 +18311,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_index_endpoint" + "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", + "shortName": "create_data_labeling_job" }, - "description": "Sample for DeleteIndexEndpoint", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_async.py", + "description": "Sample for CreateDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeleteIndexEndpoint_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateDataLabelingJob_async", "segments": [ { - "end": 55, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 60, "start": 27, "type": "SHORT" }, @@ -13621,48 +18336,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 57, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_async.py" + "title": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", - "shortName": "IndexEndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.delete_index_endpoint", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "DeleteIndexEndpoint" + "shortName": "CreateDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "data_labeling_job", + "type": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -13676,22 +18395,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_index_endpoint" + "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", + "shortName": "create_data_labeling_job" }, - "description": "Sample for DeleteIndexEndpoint", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_sync.py", + "description": "Sample for CreateDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeleteIndexEndpoint_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateDataLabelingJob_sync", "segments": [ { - "end": 55, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 60, "start": 27, "type": "SHORT" }, @@ -13701,52 +18420,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 57, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_delete_index_endpoint_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", - "shortName": "IndexEndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.deploy_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "DeployIndex" + "shortName": "CreateHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeployIndexRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest" }, { - "name": "index_endpoint", + "name": "parent", "type": "str" }, { - "name": "deployed_index", - "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" + "name": "hyperparameter_tuning_job", + "type": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob" }, { "name": "retry", @@ -13761,22 +18480,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "deploy_index" + "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", + "shortName": "create_hyperparameter_tuning_job" }, - "description": "Sample for DeployIndex", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_async.py", + "description": "Sample for CreateHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeployIndex_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateHyperparameterTuningJob_async", "segments": [ { - "end": 60, + "end": 63, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 63, "start": 27, "type": "SHORT" }, @@ -13786,51 +18505,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 57, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 60, + "start": 58, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 64, + "start": 61, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_async.py" + "title": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", - "shortName": "IndexEndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.deploy_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "DeployIndex" + "shortName": "CreateHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeployIndexRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest" }, { - "name": "index_endpoint", + "name": "parent", "type": "str" }, { - "name": "deployed_index", - "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" + "name": "hyperparameter_tuning_job", + "type": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob" }, { "name": "retry", @@ -13845,22 +18564,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "deploy_index" + "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", + "shortName": "create_hyperparameter_tuning_job" }, - "description": "Sample for DeployIndex", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_sync.py", + "description": "Sample for CreateHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_DeployIndex_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateHyperparameterTuningJob_sync", "segments": [ { - "end": 60, + "end": 63, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 63, "start": 27, "type": "SHORT" }, @@ -13870,49 +18589,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 57, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 60, + "start": 58, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 64, + "start": 61, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_deploy_index_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", - "shortName": "IndexEndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.get_index_endpoint", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "GetIndexEndpoint" + "shortName": "CreateModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -13926,22 +18649,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", - "shortName": "get_index_endpoint" + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", + "shortName": "create_model_deployment_monitoring_job" }, - "description": "Sample for GetIndexEndpoint", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_async.py", + "description": "Sample for CreateModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_GetIndexEndpoint_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateModelDeploymentMonitoringJob_async", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -13951,48 +18674,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_async.py" + "title": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", - "shortName": "IndexEndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.get_index_endpoint", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "GetIndexEndpoint" + "shortName": "CreateModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetIndexEndpointRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14006,22 +18733,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", - "shortName": "get_index_endpoint" + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", + "shortName": "create_model_deployment_monitoring_job" }, - "description": "Sample for GetIndexEndpoint", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_sync.py", + "description": "Sample for CreateModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_GetIndexEndpoint_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -14031,49 +18758,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_get_index_endpoint_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", - "shortName": "IndexEndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.list_index_endpoints", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateNasJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "ListIndexEndpoints" + "shortName": "CreateNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateNasJobRequest" }, { "name": "parent", "type": "str" }, + { + "name": "nas_job", + "type": "google.cloud.aiplatform_v1beta1.types.NasJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14087,22 +18818,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsAsyncPager", - "shortName": "list_index_endpoints" + "resultType": "google.cloud.aiplatform_v1beta1.types.NasJob", + "shortName": "create_nas_job" }, - "description": "Sample for ListIndexEndpoints", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_async.py", + "description": "Sample for CreateNasJob", + "file": "aiplatform_v1beta1_generated_job_service_create_nas_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_ListIndexEndpoints_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateNasJob_async", "segments": [ { - "end": 52, + "end": 58, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 58, "start": 27, "type": "SHORT" }, @@ -14112,48 +18843,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 52, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 55, + "start": 53, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 59, + "start": 56, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_async.py" + "title": "aiplatform_v1beta1_generated_job_service_create_nas_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", - "shortName": "IndexEndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.list_index_endpoints", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateNasJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "ListIndexEndpoints" + "shortName": "CreateNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListIndexEndpointsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateNasJobRequest" }, { "name": "parent", "type": "str" }, + { + "name": "nas_job", + "type": "google.cloud.aiplatform_v1beta1.types.NasJob" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14167,22 +18902,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.index_endpoint_service.pagers.ListIndexEndpointsPager", - "shortName": "list_index_endpoints" + "resultType": "google.cloud.aiplatform_v1beta1.types.NasJob", + "shortName": "create_nas_job" }, - "description": "Sample for ListIndexEndpoints", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_sync.py", + "description": "Sample for CreateNasJob", + "file": "aiplatform_v1beta1_generated_job_service_create_nas_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_ListIndexEndpoints_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_CreateNasJob_sync", "segments": [ { - "end": 52, + "end": 58, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 58, "start": 27, "type": "SHORT" }, @@ -14192,53 +18927,49 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 52, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 55, + "start": 53, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 59, + "start": 56, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_list_index_endpoints_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_create_nas_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", - "shortName": "IndexEndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.mutate_deployed_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "MutateDeployedIndex" + "shortName": "DeleteBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest" }, { - "name": "index_endpoint", + "name": "name", "type": "str" }, - { - "name": "deployed_index", - "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14253,21 +18984,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "mutate_deployed_index" + "shortName": "delete_batch_prediction_job" }, - "description": "Sample for MutateDeployedIndex", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_async.py", + "description": "Sample for DeleteBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_MutateDeployedIndex_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteBatchPredictionJob_async", "segments": [ { - "end": 60, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 55, "start": 27, "type": "SHORT" }, @@ -14277,52 +19008,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_async.py" + "title": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", - "shortName": "IndexEndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.mutate_deployed_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.MutateDeployedIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "MutateDeployedIndex" + "shortName": "DeleteBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.MutateDeployedIndexRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest" }, { - "name": "index_endpoint", + "name": "name", "type": "str" }, - { - "name": "deployed_index", - "type": "google.cloud.aiplatform_v1beta1.types.DeployedIndex" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14337,21 +19064,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "mutate_deployed_index" + "shortName": "delete_batch_prediction_job" }, - "description": "Sample for MutateDeployedIndex", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_sync.py", + "description": "Sample for DeleteBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_MutateDeployedIndex_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteBatchPredictionJob_sync", "segments": [ { - "end": 60, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 55, "start": 27, "type": "SHORT" }, @@ -14361,51 +19088,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 51, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_mutate_deployed_index_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", - "shortName": "IndexEndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.undeploy_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UndeployIndex" + "shortName": "DeleteCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest" - }, - { - "name": "index_endpoint", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest" }, { - "name": "deployed_index_id", + "name": "name", "type": "str" }, { @@ -14422,21 +19145,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "undeploy_index" + "shortName": "delete_custom_job" }, - "description": "Sample for UndeployIndex", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_async.py", + "description": "Sample for DeleteCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_custom_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UndeployIndex_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteCustomJob_async", "segments": [ { - "end": 56, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 55, "start": 27, "type": "SHORT" }, @@ -14446,50 +19169,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_async.py" + "title": "aiplatform_v1beta1_generated_job_service_delete_custom_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", - "shortName": "IndexEndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.undeploy_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UndeployIndex" + "shortName": "DeleteCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UndeployIndexRequest" - }, - { - "name": "index_endpoint", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest" }, { - "name": "deployed_index_id", + "name": "name", "type": "str" }, { @@ -14506,21 +19225,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "undeploy_index" + "shortName": "delete_custom_job" }, - "description": "Sample for UndeployIndex", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_sync.py", + "description": "Sample for DeleteCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UndeployIndex_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteCustomJob_sync", "segments": [ { - "end": 56, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 55, "start": 27, "type": "SHORT" }, @@ -14530,52 +19249,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_undeploy_index_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient", - "shortName": "IndexEndpointServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceAsyncClient.update_index_endpoint", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UpdateIndexEndpoint" + "shortName": "DeleteDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest" - }, - { - "name": "index_endpoint", - "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -14590,22 +19305,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", - "shortName": "update_index_endpoint" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_data_labeling_job" }, - "description": "Sample for UpdateIndexEndpoint", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_async.py", + "description": "Sample for DeleteDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UpdateIndexEndpoint_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteDataLabelingJob_async", "segments": [ { - "end": 54, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 55, "start": 27, "type": "SHORT" }, @@ -14615,51 +19330,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 49, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_async.py" + "title": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient", - "shortName": "IndexEndpointServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexEndpointServiceClient.update_index_endpoint", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexEndpointService", - "shortName": "IndexEndpointService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UpdateIndexEndpoint" + "shortName": "DeleteDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexEndpointRequest" - }, - { - "name": "index_endpoint", - "type": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -14674,22 +19385,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.IndexEndpoint", - "shortName": "update_index_endpoint" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_data_labeling_job" }, - "description": "Sample for UpdateIndexEndpoint", - "file": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_sync.py", + "description": "Sample for DeleteDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexEndpointService_UpdateIndexEndpoint_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteDataLabelingJob_sync", "segments": [ { - "end": 54, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 55, "start": 27, "type": "SHORT" }, @@ -14699,53 +19410,49 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 49, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_endpoint_service_update_index_endpoint_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", - "shortName": "IndexServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.create_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.CreateIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "CreateIndex" + "shortName": "DeleteHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "index", - "type": "google.cloud.aiplatform_v1beta1.types.Index" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14760,21 +19467,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_index" + "shortName": "delete_hyperparameter_tuning_job" }, - "description": "Sample for CreateIndex", - "file": "aiplatform_v1beta1_generated_index_service_create_index_async.py", + "description": "Sample for DeleteHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_CreateIndex_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_async", "segments": [ { - "end": 59, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 55, "start": 27, "type": "SHORT" }, @@ -14784,52 +19491,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_create_index_async.py" + "title": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", - "shortName": "IndexServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.create_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.CreateIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "CreateIndex" + "shortName": "DeleteHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateIndexRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "index", - "type": "google.cloud.aiplatform_v1beta1.types.Index" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -14844,21 +19547,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "create_index" + "shortName": "delete_hyperparameter_tuning_job" }, - "description": "Sample for CreateIndex", - "file": "aiplatform_v1beta1_generated_index_service_create_index_sync.py", + "description": "Sample for DeleteHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_CreateIndex_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_sync", "segments": [ { - "end": 59, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 55, "start": 27, "type": "SHORT" }, @@ -14868,44 +19571,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_create_index_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", - "shortName": "IndexServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.delete_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "DeleteIndex" + "shortName": "DeleteModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -14925,13 +19628,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_index" + "shortName": "delete_model_deployment_monitoring_job" }, - "description": "Sample for DeleteIndex", - "file": "aiplatform_v1beta1_generated_index_service_delete_index_async.py", + "description": "Sample for DeleteModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_DeleteIndex_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_async", "segments": [ { "end": 55, @@ -14964,28 +19667,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_delete_index_async.py" + "title": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", - "shortName": "IndexServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.delete_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "DeleteIndex" + "shortName": "DeleteModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -15005,13 +19708,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_index" + "shortName": "delete_model_deployment_monitoring_job" }, - "description": "Sample for DeleteIndex", - "file": "aiplatform_v1beta1_generated_index_service_delete_index_sync.py", + "description": "Sample for DeleteModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_DeleteIndex_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync", "segments": [ { "end": 55, @@ -15044,29 +19747,29 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_delete_index_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", - "shortName": "IndexServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.get_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.GetIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteNasJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "GetIndex" + "shortName": "DeleteNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetIndexRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteNasJobRequest" }, { "name": "name", @@ -15085,22 +19788,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Index", - "shortName": "get_index" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_nas_job" }, - "description": "Sample for GetIndex", - "file": "aiplatform_v1beta1_generated_index_service_get_index_async.py", + "description": "Sample for DeleteNasJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_nas_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_GetIndex_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteNasJob_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -15115,38 +19818,38 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_get_index_async.py" + "title": "aiplatform_v1beta1_generated_job_service_delete_nas_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", - "shortName": "IndexServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.get_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.GetIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteNasJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "GetIndex" + "shortName": "DeleteNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetIndexRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteNasJobRequest" }, { "name": "name", @@ -15165,22 +19868,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Index", - "shortName": "get_index" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_nas_job" }, - "description": "Sample for GetIndex", - "file": "aiplatform_v1beta1_generated_index_service_get_index_sync.py", + "description": "Sample for DeleteNasJob", + "file": "aiplatform_v1beta1_generated_job_service_delete_nas_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_GetIndex_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteNasJob_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -15195,42 +19898,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_get_index_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_delete_nas_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", - "shortName": "IndexServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.list_indexes", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.ListIndexes", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "ListIndexes" + "shortName": "GetBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListIndexesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -15246,22 +19949,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesAsyncPager", - "shortName": "list_indexes" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" }, - "description": "Sample for ListIndexes", - "file": "aiplatform_v1beta1_generated_index_service_list_indexes_async.py", + "description": "Sample for GetBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_ListIndexes_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetBatchPredictionJob_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -15281,36 +19984,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_list_indexes_async.py" + "title": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", - "shortName": "IndexServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.list_indexes", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_batch_prediction_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.ListIndexes", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "ListIndexes" + "shortName": "GetBatchPredictionJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListIndexesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -15326,22 +20029,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesPager", - "shortName": "list_indexes" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", + "shortName": "get_batch_prediction_job" }, - "description": "Sample for ListIndexes", - "file": "aiplatform_v1beta1_generated_index_service_list_indexes_sync.py", + "description": "Sample for GetBatchPredictionJob", + "file": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_ListIndexes_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetBatchPredictionJob_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -15361,34 +20064,38 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_list_indexes_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", - "shortName": "IndexServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.remove_datapoints", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.RemoveDatapoints", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetCustomJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "RemoveDatapoints" + "shortName": "GetCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.RemoveDatapointsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -15403,14 +20110,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.RemoveDatapointsResponse", - "shortName": "remove_datapoints" + "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", + "shortName": "get_custom_job" }, - "description": "Sample for RemoveDatapoints", - "file": "aiplatform_v1beta1_generated_index_service_remove_datapoints_async.py", + "description": "Sample for GetCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_get_custom_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_RemoveDatapoints_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetCustomJob_async", "segments": [ { "end": 51, @@ -15443,28 +20150,32 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_remove_datapoints_async.py" + "title": "aiplatform_v1beta1_generated_job_service_get_custom_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", - "shortName": "IndexServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.remove_datapoints", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_custom_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.RemoveDatapoints", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetCustomJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "RemoveDatapoints" + "shortName": "GetCustomJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.RemoveDatapointsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -15479,14 +20190,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.RemoveDatapointsResponse", - "shortName": "remove_datapoints" + "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", + "shortName": "get_custom_job" }, - "description": "Sample for RemoveDatapoints", - "file": "aiplatform_v1beta1_generated_index_service_remove_datapoints_sync.py", + "description": "Sample for GetCustomJob", + "file": "aiplatform_v1beta1_generated_job_service_get_custom_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_RemoveDatapoints_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetCustomJob_sync", "segments": [ { "end": 51, @@ -15519,37 +20230,33 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_remove_datapoints_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_get_custom_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", - "shortName": "IndexServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.update_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UpdateIndex" + "shortName": "GetDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest" - }, - { - "name": "index", - "type": "google.cloud.aiplatform_v1beta1.types.Index" + "type": "google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -15564,22 +20271,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_index" + "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" }, - "description": "Sample for UpdateIndex", - "file": "aiplatform_v1beta1_generated_index_service_update_index_async.py", + "description": "Sample for GetDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_UpdateIndex_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetDataLabelingJob_async", "segments": [ { - "end": 58, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 51, "start": 27, "type": "SHORT" }, @@ -15589,51 +20296,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 49, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_update_index_async.py" + "title": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", - "shortName": "IndexServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.update_index", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_data_labeling_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UpdateIndex" + "shortName": "GetDataLabelingJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest" - }, - { - "name": "index", - "type": "google.cloud.aiplatform_v1beta1.types.Index" + "type": "google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -15648,22 +20351,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_index" + "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", + "shortName": "get_data_labeling_job" }, - "description": "Sample for UpdateIndex", - "file": "aiplatform_v1beta1_generated_index_service_update_index_sync.py", + "description": "Sample for GetDataLabelingJob", + "file": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_UpdateIndex_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetDataLabelingJob_sync", "segments": [ { - "end": 58, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 51, "start": 27, "type": "SHORT" }, @@ -15673,44 +20376,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 49, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_update_index_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient", - "shortName": "IndexServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", + "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceAsyncClient.upsert_datapoints", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.UpsertDatapoints", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UpsertDatapoints" + "shortName": "GetHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpsertDatapointsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -15725,14 +20432,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.UpsertDatapointsResponse", - "shortName": "upsert_datapoints" + "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" }, - "description": "Sample for UpsertDatapoints", - "file": "aiplatform_v1beta1_generated_index_service_upsert_datapoints_async.py", + "description": "Sample for GetHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_UpsertDatapoints_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_async", "segments": [ { "end": 51, @@ -15765,28 +20472,32 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_upsert_datapoints_async.py" + "title": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient", - "shortName": "IndexServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", + "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.IndexServiceClient.upsert_datapoints", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_hyperparameter_tuning_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService.UpsertDatapoints", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.IndexService", - "shortName": "IndexService" + "fullName": "google.cloud.aiplatform.v1beta1.JobService", + "shortName": "JobService" }, - "shortName": "UpsertDatapoints" + "shortName": "GetHyperparameterTuningJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpsertDatapointsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -15801,14 +20512,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.UpsertDatapointsResponse", - "shortName": "upsert_datapoints" + "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", + "shortName": "get_hyperparameter_tuning_job" }, - "description": "Sample for UpsertDatapoints", - "file": "aiplatform_v1beta1_generated_index_service_upsert_datapoints_sync.py", + "description": "Sample for GetHyperparameterTuningJob", + "file": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_IndexService_UpsertDatapoints_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_sync", "segments": [ { "end": 51, @@ -15841,7 +20552,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_index_service_upsert_datapoints_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py" }, { "canonical": true, @@ -15851,19 +20562,19 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CancelBatchPredictionJob" + "shortName": "GetModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -15882,21 +20593,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_batch_prediction_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" }, - "description": "Sample for CancelBatchPredictionJob", - "file": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_async.py", + "description": "Sample for GetModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CancelBatchPredictionJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_async", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -15911,15 +20623,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_async.py" }, { "canonical": true, @@ -15928,19 +20642,19 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CancelBatchPredictionJob" + "shortName": "GetModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -15959,21 +20673,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_batch_prediction_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", + "shortName": "get_model_deployment_monitoring_job" }, - "description": "Sample for CancelBatchPredictionJob", - "file": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_sync.py", + "description": "Sample for GetModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CancelBatchPredictionJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -15988,15 +20703,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_cancel_batch_prediction_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_sync.py" }, { "canonical": true, @@ -16006,19 +20723,19 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_custom_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetNasJob", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CancelCustomJob" + "shortName": "GetNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetNasJobRequest" }, { "name": "name", @@ -16037,21 +20754,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_custom_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.NasJob", + "shortName": "get_nas_job" }, - "description": "Sample for CancelCustomJob", - "file": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_async.py", + "description": "Sample for GetNasJob", + "file": "aiplatform_v1beta1_generated_job_service_get_nas_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CancelCustomJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetNasJob_async", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -16066,15 +20784,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_get_nas_job_async.py" }, { "canonical": true, @@ -16083,19 +20803,19 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_custom_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_nas_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetNasJob", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CancelCustomJob" + "shortName": "GetNasJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelCustomJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetNasJobRequest" }, { "name": "name", @@ -16114,21 +20834,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_custom_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.NasJob", + "shortName": "get_nas_job" }, - "description": "Sample for CancelCustomJob", - "file": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_sync.py", + "description": "Sample for GetNasJob", + "file": "aiplatform_v1beta1_generated_job_service_get_nas_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CancelCustomJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetNasJob_sync", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -16143,15 +20864,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_cancel_custom_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_get_nas_job_sync.py" }, { "canonical": true, @@ -16161,19 +20884,19 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_nas_trial_detail", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetNasTrialDetail", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CancelDataLabelingJob" + "shortName": "GetNasTrialDetail" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetNasTrialDetailRequest" }, { "name": "name", @@ -16192,21 +20915,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_data_labeling_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.NasTrialDetail", + "shortName": "get_nas_trial_detail" }, - "description": "Sample for CancelDataLabelingJob", - "file": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_async.py", + "description": "Sample for GetNasTrialDetail", + "file": "aiplatform_v1beta1_generated_job_service_get_nas_trial_detail_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CancelDataLabelingJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetNasTrialDetail_async", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -16221,15 +20945,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_get_nas_trial_detail_async.py" }, { "canonical": true, @@ -16238,19 +20964,19 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_nas_trial_detail", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetNasTrialDetail", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CancelDataLabelingJob" + "shortName": "GetNasTrialDetail" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetNasTrialDetailRequest" }, { "name": "name", @@ -16269,21 +20995,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_data_labeling_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.NasTrialDetail", + "shortName": "get_nas_trial_detail" }, - "description": "Sample for CancelDataLabelingJob", - "file": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_sync.py", + "description": "Sample for GetNasTrialDetail", + "file": "aiplatform_v1beta1_generated_job_service_get_nas_trial_detail_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CancelDataLabelingJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_GetNasTrialDetail_sync", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -16298,15 +21025,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_cancel_data_labeling_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_get_nas_trial_detail_sync.py" }, { "canonical": true, @@ -16316,22 +21045,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_batch_prediction_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CancelHyperparameterTuningJob" + "shortName": "ListBatchPredictionJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -16347,21 +21076,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager", + "shortName": "list_batch_prediction_jobs" }, - "description": "Sample for CancelHyperparameterTuningJob", - "file": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_async.py", + "description": "Sample for ListBatchPredictionJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CancelHyperparameterTuningJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_async", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -16376,15 +21106,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py" }, { "canonical": true, @@ -16393,22 +21125,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_batch_prediction_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CancelHyperparameterTuningJob" + "shortName": "ListBatchPredictionJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -16424,21 +21156,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsPager", + "shortName": "list_batch_prediction_jobs" }, - "description": "Sample for CancelHyperparameterTuningJob", - "file": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py", + "description": "Sample for ListBatchPredictionJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CancelHyperparameterTuningJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_sync", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -16453,15 +21186,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_cancel_hyperparameter_tuning_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_sync.py" }, { "canonical": true, @@ -16471,22 +21206,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.cancel_nas_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_custom_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelNasJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CancelNasJob" + "shortName": "ListCustomJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelNasJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -16502,21 +21237,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_nas_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsAsyncPager", + "shortName": "list_custom_jobs" }, - "description": "Sample for CancelNasJob", - "file": "aiplatform_v1beta1_generated_job_service_cancel_nas_job_async.py", + "description": "Sample for ListCustomJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CancelNasJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListCustomJobs_async", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -16531,15 +21267,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_cancel_nas_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_async.py" }, { "canonical": true, @@ -16548,22 +21286,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.cancel_nas_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_custom_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CancelNasJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CancelNasJob" + "shortName": "ListCustomJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelNasJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -16579,21 +21317,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_nas_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsPager", + "shortName": "list_custom_jobs" }, - "description": "Sample for CancelNasJob", - "file": "aiplatform_v1beta1_generated_job_service_cancel_nas_job_sync.py", + "description": "Sample for ListCustomJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CancelNasJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListCustomJobs_sync", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -16608,15 +21347,17 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_cancel_nas_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_sync.py" }, { "canonical": true, @@ -16626,28 +21367,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_data_labeling_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CreateBatchPredictionJob" + "shortName": "ListDataLabelingJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "batch_prediction_job", - "type": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -16661,22 +21398,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", - "shortName": "create_batch_prediction_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsAsyncPager", + "shortName": "list_data_labeling_jobs" }, - "description": "Sample for CreateBatchPredictionJob", - "file": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_async.py", + "description": "Sample for ListDataLabelingJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CreateBatchPredictionJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListDataLabelingJobs_async", "segments": [ { - "end": 59, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 52, "start": 27, "type": "SHORT" }, @@ -16686,22 +21423,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 53, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 54, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_async.py" }, { "canonical": true, @@ -16710,28 +21447,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_data_labeling_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CreateBatchPredictionJob" + "shortName": "ListDataLabelingJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "batch_prediction_job", - "type": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -16745,22 +21478,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", - "shortName": "create_batch_prediction_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsPager", + "shortName": "list_data_labeling_jobs" }, - "description": "Sample for CreateBatchPredictionJob", - "file": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_sync.py", + "description": "Sample for ListDataLabelingJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CreateBatchPredictionJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListDataLabelingJobs_sync", "segments": [ { - "end": 59, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 52, "start": 27, "type": "SHORT" }, @@ -16770,22 +21503,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 53, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 54, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_create_batch_prediction_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_sync.py" }, { "canonical": true, @@ -16795,28 +21528,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_custom_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_hyperparameter_tuning_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CreateCustomJob" + "shortName": "ListHyperparameterTuningJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "custom_job", - "type": "google.cloud.aiplatform_v1beta1.types.CustomJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -16830,22 +21559,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", - "shortName": "create_custom_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager", + "shortName": "list_hyperparameter_tuning_jobs" }, - "description": "Sample for CreateCustomJob", - "file": "aiplatform_v1beta1_generated_job_service_create_custom_job_async.py", + "description": "Sample for ListHyperparameterTuningJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CreateCustomJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListHyperparameterTuningJobs_async", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -16855,22 +21584,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_create_custom_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_async.py" }, { "canonical": true, @@ -16879,28 +21608,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_custom_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_hyperparameter_tuning_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CreateCustomJob" + "shortName": "ListHyperparameterTuningJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateCustomJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "custom_job", - "type": "google.cloud.aiplatform_v1beta1.types.CustomJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -16914,22 +21639,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", - "shortName": "create_custom_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsPager", + "shortName": "list_hyperparameter_tuning_jobs" }, - "description": "Sample for CreateCustomJob", - "file": "aiplatform_v1beta1_generated_job_service_create_custom_job_sync.py", + "description": "Sample for ListHyperparameterTuningJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CreateCustomJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListHyperparameterTuningJobs_sync", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -16939,22 +21664,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_create_custom_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py" }, { "canonical": true, @@ -16964,28 +21689,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_model_deployment_monitoring_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CreateDataLabelingJob" + "shortName": "ListModelDeploymentMonitoringJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "data_labeling_job", - "type": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -16999,22 +21720,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", - "shortName": "create_data_labeling_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager", + "shortName": "list_model_deployment_monitoring_jobs" }, - "description": "Sample for CreateDataLabelingJob", - "file": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_async.py", + "description": "Sample for ListModelDeploymentMonitoringJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CreateDataLabelingJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListModelDeploymentMonitoringJobs_async", "segments": [ { - "end": 60, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 52, "start": 27, "type": "SHORT" }, @@ -17024,22 +21745,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 55, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_async.py" }, { "canonical": true, @@ -17048,28 +21769,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_model_deployment_monitoring_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CreateDataLabelingJob" + "shortName": "ListModelDeploymentMonitoringJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "data_labeling_job", - "type": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17083,22 +21800,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", - "shortName": "create_data_labeling_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager", + "shortName": "list_model_deployment_monitoring_jobs" }, - "description": "Sample for CreateDataLabelingJob", - "file": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_sync.py", + "description": "Sample for ListModelDeploymentMonitoringJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CreateDataLabelingJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListModelDeploymentMonitoringJobs_sync", "segments": [ { - "end": 60, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 52, "start": 27, "type": "SHORT" }, @@ -17108,22 +21825,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 55, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_create_data_labeling_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py" }, { "canonical": true, @@ -17133,28 +21850,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_nas_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListNasJobs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CreateHyperparameterTuningJob" + "shortName": "ListNasJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListNasJobsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "hyperparameter_tuning_job", - "type": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17168,22 +21881,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", - "shortName": "create_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListNasJobsAsyncPager", + "shortName": "list_nas_jobs" }, - "description": "Sample for CreateHyperparameterTuningJob", - "file": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_async.py", + "description": "Sample for ListNasJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_nas_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CreateHyperparameterTuningJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListNasJobs_async", "segments": [ { - "end": 63, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 63, + "end": 52, "start": 27, "type": "SHORT" }, @@ -17193,22 +21906,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 57, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 60, - "start": 58, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 64, - "start": 61, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_list_nas_jobs_async.py" }, { "canonical": true, @@ -17217,28 +21930,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_nas_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListNasJobs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CreateHyperparameterTuningJob" + "shortName": "ListNasJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListNasJobsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "hyperparameter_tuning_job", - "type": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17252,22 +21961,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", - "shortName": "create_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListNasJobsPager", + "shortName": "list_nas_jobs" }, - "description": "Sample for CreateHyperparameterTuningJob", - "file": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_sync.py", + "description": "Sample for ListNasJobs", + "file": "aiplatform_v1beta1_generated_job_service_list_nas_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CreateHyperparameterTuningJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListNasJobs_sync", "segments": [ { - "end": 63, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 63, + "end": 52, "start": 27, "type": "SHORT" }, @@ -17277,22 +21986,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 57, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 60, - "start": 58, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 64, - "start": 61, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_create_hyperparameter_tuning_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_list_nas_jobs_sync.py" }, { "canonical": true, @@ -17302,28 +22011,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_nas_trial_details", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListNasTrialDetails", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CreateModelDeploymentMonitoringJob" + "shortName": "ListNasTrialDetails" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListNasTrialDetailsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "model_deployment_monitoring_job", - "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17337,22 +22042,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", - "shortName": "create_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListNasTrialDetailsAsyncPager", + "shortName": "list_nas_trial_details" }, - "description": "Sample for CreateModelDeploymentMonitoringJob", - "file": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_async.py", + "description": "Sample for ListNasTrialDetails", + "file": "aiplatform_v1beta1_generated_job_service_list_nas_trial_details_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CreateModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListNasTrialDetails_async", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -17362,22 +22067,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_list_nas_trial_details_async.py" }, { "canonical": true, @@ -17386,28 +22091,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_nas_trial_details", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListNasTrialDetails", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CreateModelDeploymentMonitoringJob" + "shortName": "ListNasTrialDetails" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListNasTrialDetailsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "model_deployment_monitoring_job", - "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17421,22 +22122,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", - "shortName": "create_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListNasTrialDetailsPager", + "shortName": "list_nas_trial_details" }, - "description": "Sample for CreateModelDeploymentMonitoringJob", - "file": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_sync.py", + "description": "Sample for ListNasTrialDetails", + "file": "aiplatform_v1beta1_generated_job_service_list_nas_trial_details_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CreateModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_ListNasTrialDetails_sync", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -17446,22 +22147,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_create_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_list_nas_trial_details_sync.py" }, { "canonical": true, @@ -17471,28 +22172,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.create_nas_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.pause_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateNasJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CreateNasJob" + "shortName": "PauseModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateNasJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "nas_job", - "type": "google.cloud.aiplatform_v1beta1.types.NasJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17506,22 +22203,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.NasJob", - "shortName": "create_nas_job" + "shortName": "pause_model_deployment_monitoring_job" }, - "description": "Sample for CreateNasJob", - "file": "aiplatform_v1beta1_generated_job_service_create_nas_job_async.py", + "description": "Sample for PauseModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CreateNasJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_PauseModelDeploymentMonitoringJob_async", "segments": [ { - "end": 58, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 49, "start": 27, "type": "SHORT" }, @@ -17531,22 +22227,20 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 52, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 53, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_create_nas_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_async.py" }, { "canonical": true, @@ -17555,28 +22249,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.create_nas_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.pause_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.CreateNasJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "CreateNasJob" + "shortName": "PauseModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateNasJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "nas_job", - "type": "google.cloud.aiplatform_v1beta1.types.NasJob" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -17590,22 +22280,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.NasJob", - "shortName": "create_nas_job" + "shortName": "pause_model_deployment_monitoring_job" }, - "description": "Sample for CreateNasJob", - "file": "aiplatform_v1beta1_generated_job_service_create_nas_job_sync.py", + "description": "Sample for PauseModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_CreateNasJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_PauseModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 58, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 49, "start": 27, "type": "SHORT" }, @@ -17615,22 +22304,20 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 52, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 53, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_create_nas_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_sync.py" }, { "canonical": true, @@ -17640,19 +22327,19 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.resume_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "DeleteBatchPredictionJob" + "shortName": "ResumeModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -17671,22 +22358,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_batch_prediction_job" + "shortName": "resume_model_deployment_monitoring_job" }, - "description": "Sample for DeleteBatchPredictionJob", - "file": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_async.py", + "description": "Sample for ResumeModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteBatchPredictionJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_ResumeModelDeploymentMonitoringJob_async", "segments": [ { - "end": 55, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 49, "start": 27, "type": "SHORT" }, @@ -17701,17 +22387,15 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_async.py" }, { "canonical": true, @@ -17720,19 +22404,19 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.resume_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "DeleteBatchPredictionJob" + "shortName": "ResumeModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest" }, { "name": "name", @@ -17751,22 +22435,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_batch_prediction_job" + "shortName": "resume_model_deployment_monitoring_job" }, - "description": "Sample for DeleteBatchPredictionJob", - "file": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_sync.py", + "description": "Sample for ResumeModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteBatchPredictionJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 55, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 49, "start": 27, "type": "SHORT" }, @@ -17781,17 +22464,15 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_delete_batch_prediction_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_sync.py" }, { "canonical": true, @@ -17801,22 +22482,26 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_custom_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.search_model_deployment_monitoring_stats_anomalies", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "DeleteCustomJob" + "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" }, { - "name": "name", + "name": "model_deployment_monitoring_job", + "type": "str" + }, + { + "name": "deployed_model_id", "type": "str" }, { @@ -17832,22 +22517,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_custom_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" }, - "description": "Sample for DeleteCustomJob", - "file": "aiplatform_v1beta1_generated_job_service_delete_custom_job_async.py", + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", + "file": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteCustomJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async", "segments": [ { - "end": 55, + "end": 53, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 53, "start": 27, "type": "SHORT" }, @@ -17857,22 +22542,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 54, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_delete_custom_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py" }, { "canonical": true, @@ -17881,22 +22566,26 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_custom_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.search_model_deployment_monitoring_stats_anomalies", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "DeleteCustomJob" + "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteCustomJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" }, { - "name": "name", + "name": "model_deployment_monitoring_job", + "type": "str" + }, + { + "name": "deployed_model_id", "type": "str" }, { @@ -17912,22 +22601,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_custom_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager", + "shortName": "search_model_deployment_monitoring_stats_anomalies" }, - "description": "Sample for DeleteCustomJob", - "file": "aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py", + "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", + "file": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteCustomJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync", "segments": [ { - "end": 55, + "end": 53, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 53, "start": 27, "type": "SHORT" }, @@ -17937,22 +22626,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 54, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_delete_custom_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py" }, { "canonical": true, @@ -17962,23 +22651,27 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", "shortName": "JobServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.update_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "DeleteDataLabelingJob" + "shortName": "UpdateModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest" }, { - "name": "name", - "type": "str" + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -17994,21 +22687,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_data_labeling_job" + "shortName": "update_model_deployment_monitoring_job" }, - "description": "Sample for DeleteDataLabelingJob", - "file": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_async.py", + "description": "Sample for UpdateModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteDataLabelingJob_async", + "regionTag": "aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_async", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -18018,22 +22711,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_async.py" + "title": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py" }, { "canonical": true, @@ -18042,23 +22735,27 @@ "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", "shortName": "JobServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.update_model_deployment_monitoring_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob", "service": { "fullName": "google.cloud.aiplatform.v1beta1.JobService", "shortName": "JobService" }, - "shortName": "DeleteDataLabelingJob" + "shortName": "UpdateModelDeploymentMonitoringJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest" }, { - "name": "name", - "type": "str" + "name": "model_deployment_monitoring_job", + "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -18074,21 +22771,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_data_labeling_job" + "shortName": "update_model_deployment_monitoring_job" }, - "description": "Sample for DeleteDataLabelingJob", - "file": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_sync.py", + "description": "Sample for UpdateModelDeploymentMonitoringJob", + "file": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteDataLabelingJob_sync", + "regionTag": "aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -18098,49 +22795,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_delete_data_labeling_job_sync.py" + "title": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.LlmUtilityServiceAsyncClient", + "shortName": "LlmUtilityServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1beta1.LlmUtilityServiceAsyncClient.compute_tokens", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1beta1.LlmUtilityService.ComputeTokens", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.LlmUtilityService", + "shortName": "LlmUtilityService" }, - "shortName": "DeleteHyperparameterTuningJob" + "shortName": "ComputeTokens" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ComputeTokensRequest" }, { - "name": "name", + "name": "endpoint", "type": "str" }, + { + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -18154,14 +22855,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.ComputeTokensResponse", + "shortName": "compute_tokens" }, - "description": "Sample for DeleteHyperparameterTuningJob", - "file": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py", + "description": "Sample for ComputeTokens", + "file": "aiplatform_v1beta1_generated_llm_utility_service_compute_tokens_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_async", + "regionTag": "aiplatform_v1beta1_generated_LlmUtilityService_ComputeTokens_async", "segments": [ { "end": 55, @@ -18179,13 +22880,13 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { "end": 52, - "start": 46, + "start": 50, "type": "REQUEST_EXECUTION" }, { @@ -18194,33 +22895,37 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_async.py" + "title": "aiplatform_v1beta1_generated_llm_utility_service_compute_tokens_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.LlmUtilityServiceClient", + "shortName": "LlmUtilityServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1beta1.LlmUtilityServiceClient.compute_tokens", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1beta1.LlmUtilityService.ComputeTokens", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.LlmUtilityService", + "shortName": "LlmUtilityService" }, - "shortName": "DeleteHyperparameterTuningJob" + "shortName": "ComputeTokens" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ComputeTokensRequest" }, { - "name": "name", + "name": "endpoint", "type": "str" }, + { + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -18234,14 +22939,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.ComputeTokensResponse", + "shortName": "compute_tokens" }, - "description": "Sample for DeleteHyperparameterTuningJob", - "file": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_sync.py", + "description": "Sample for ComputeTokens", + "file": "aiplatform_v1beta1_generated_llm_utility_service_compute_tokens_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteHyperparameterTuningJob_sync", + "regionTag": "aiplatform_v1beta1_generated_LlmUtilityService_ComputeTokens_sync", "segments": [ { "end": 55, @@ -18259,13 +22964,13 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { "end": 52, - "start": 46, + "start": 50, "type": "REQUEST_EXECUTION" }, { @@ -18274,33 +22979,29 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_delete_hyperparameter_tuning_job_sync.py" + "title": "aiplatform_v1beta1_generated_llm_utility_service_compute_tokens_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceAsyncClient", + "shortName": "MatchServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceAsyncClient.find_neighbors", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1beta1.MatchService.FindNeighbors", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MatchService", + "shortName": "MatchService" }, - "shortName": "DeleteModelDeploymentMonitoringJob" + "shortName": "FindNeighbors" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.FindNeighborsRequest" }, { "name": "retry", @@ -18315,22 +23016,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.FindNeighborsResponse", + "shortName": "find_neighbors" }, - "description": "Sample for DeleteModelDeploymentMonitoringJob", - "file": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_async.py", + "description": "Sample for FindNeighbors", + "file": "aiplatform_v1beta1_generated_match_service_find_neighbors_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1beta1_generated_MatchService_FindNeighbors_async", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -18345,42 +23046,38 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1beta1_generated_match_service_find_neighbors_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceClient", + "shortName": "MatchServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceClient.find_neighbors", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1beta1.MatchService.FindNeighbors", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MatchService", + "shortName": "MatchService" }, - "shortName": "DeleteModelDeploymentMonitoringJob" + "shortName": "FindNeighbors" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelDeploymentMonitoringJobRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.FindNeighborsRequest" }, { "name": "retry", @@ -18395,22 +23092,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.FindNeighborsResponse", + "shortName": "find_neighbors" }, - "description": "Sample for DeleteModelDeploymentMonitoringJob", - "file": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_sync.py", + "description": "Sample for FindNeighbors", + "file": "aiplatform_v1beta1_generated_match_service_find_neighbors_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1beta1_generated_MatchService_FindNeighbors_sync", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -18425,43 +23122,39 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_delete_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1beta1_generated_match_service_find_neighbors_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceAsyncClient", + "shortName": "MatchServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.delete_nas_job", + "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceAsyncClient.read_index_datapoints", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteNasJob", + "fullName": "google.cloud.aiplatform.v1beta1.MatchService.ReadIndexDatapoints", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MatchService", + "shortName": "MatchService" }, - "shortName": "DeleteNasJob" + "shortName": "ReadIndexDatapoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteNasJobRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.ReadIndexDatapointsRequest" }, { "name": "retry", @@ -18476,22 +23169,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_nas_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadIndexDatapointsResponse", + "shortName": "read_index_datapoints" }, - "description": "Sample for DeleteNasJob", - "file": "aiplatform_v1beta1_generated_job_service_delete_nas_job_async.py", + "description": "Sample for ReadIndexDatapoints", + "file": "aiplatform_v1beta1_generated_match_service_read_index_datapoints_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteNasJob_async", + "regionTag": "aiplatform_v1beta1_generated_MatchService_ReadIndexDatapoints_async", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -18506,42 +23199,38 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_delete_nas_job_async.py" + "title": "aiplatform_v1beta1_generated_match_service_read_index_datapoints_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceClient", + "shortName": "MatchServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.delete_nas_job", + "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceClient.read_index_datapoints", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.DeleteNasJob", + "fullName": "google.cloud.aiplatform.v1beta1.MatchService.ReadIndexDatapoints", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MatchService", + "shortName": "MatchService" }, - "shortName": "DeleteNasJob" + "shortName": "ReadIndexDatapoints" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteNasJobRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.ReadIndexDatapointsRequest" }, { "name": "retry", @@ -18556,22 +23245,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_nas_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadIndexDatapointsResponse", + "shortName": "read_index_datapoints" }, - "description": "Sample for DeleteNasJob", - "file": "aiplatform_v1beta1_generated_job_service_delete_nas_job_sync.py", + "description": "Sample for ReadIndexDatapoints", + "file": "aiplatform_v1beta1_generated_match_service_read_index_datapoints_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_DeleteNasJob_sync", + "regionTag": "aiplatform_v1beta1_generated_MatchService_ReadIndexDatapoints_sync", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -18586,44 +23275,52 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_delete_nas_job_sync.py" + "title": "aiplatform_v1beta1_generated_match_service_read_index_datapoints_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.add_context_artifacts_and_executions", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetBatchPredictionJob" + "shortName": "AddContextArtifactsAndExecutions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest" }, { - "name": "name", + "name": "context", "type": "str" }, + { + "name": "artifacts", + "type": "MutableSequence[str]" + }, + { + "name": "executions", + "type": "MutableSequence[str]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -18637,14 +23334,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", - "shortName": "get_batch_prediction_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse", + "shortName": "add_context_artifacts_and_executions" }, - "description": "Sample for GetBatchPredictionJob", - "file": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_async.py", + "description": "Sample for AddContextArtifactsAndExecutions", + "file": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetBatchPredictionJob_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextArtifactsAndExecutions_async", "segments": [ { "end": 51, @@ -18677,33 +23374,41 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_batch_prediction_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.add_context_artifacts_and_executions", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetBatchPredictionJob" + "shortName": "AddContextArtifactsAndExecutions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetBatchPredictionJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest" }, { - "name": "name", + "name": "context", "type": "str" }, + { + "name": "artifacts", + "type": "MutableSequence[str]" + }, + { + "name": "executions", + "type": "MutableSequence[str]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -18717,14 +23422,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchPredictionJob", - "shortName": "get_batch_prediction_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse", + "shortName": "add_context_artifacts_and_executions" }, - "description": "Sample for GetBatchPredictionJob", - "file": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_sync.py", + "description": "Sample for AddContextArtifactsAndExecutions", + "file": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetBatchPredictionJob_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextArtifactsAndExecutions_sync", "segments": [ { "end": 51, @@ -18757,34 +23462,38 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_batch_prediction_job_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_custom_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.add_context_children", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetCustomJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetCustomJob" + "shortName": "AddContextChildren" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest" }, { - "name": "name", + "name": "context", "type": "str" }, + { + "name": "child_contexts", + "type": "MutableSequence[str]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -18798,14 +23507,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", - "shortName": "get_custom_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse", + "shortName": "add_context_children" }, - "description": "Sample for GetCustomJob", - "file": "aiplatform_v1beta1_generated_job_service_get_custom_job_async.py", + "description": "Sample for AddContextChildren", + "file": "aiplatform_v1beta1_generated_metadata_service_add_context_children_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetCustomJob_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextChildren_async", "segments": [ { "end": 51, @@ -18838,33 +23547,37 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_custom_job_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_add_context_children_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_custom_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.add_context_children", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetCustomJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetCustomJob" + "shortName": "AddContextChildren" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetCustomJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest" }, { - "name": "name", + "name": "context", "type": "str" }, + { + "name": "child_contexts", + "type": "MutableSequence[str]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -18878,14 +23591,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.CustomJob", - "shortName": "get_custom_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse", + "shortName": "add_context_children" }, - "description": "Sample for GetCustomJob", - "file": "aiplatform_v1beta1_generated_job_service_get_custom_job_sync.py", + "description": "Sample for AddContextChildren", + "file": "aiplatform_v1beta1_generated_metadata_service_add_context_children_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetCustomJob_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextChildren_sync", "segments": [ { "end": 51, @@ -18918,34 +23631,38 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_custom_job_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_add_context_children_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.add_execution_events", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetDataLabelingJob" + "shortName": "AddExecutionEvents" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest" }, { - "name": "name", + "name": "execution", "type": "str" }, + { + "name": "events", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Event]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -18959,14 +23676,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", - "shortName": "get_data_labeling_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse", + "shortName": "add_execution_events" }, - "description": "Sample for GetDataLabelingJob", - "file": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_async.py", + "description": "Sample for AddExecutionEvents", + "file": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetDataLabelingJob_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddExecutionEvents_async", "segments": [ { "end": 51, @@ -18999,33 +23716,37 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_data_labeling_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.add_execution_events", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetDataLabelingJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetDataLabelingJob" + "shortName": "AddExecutionEvents" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetDataLabelingJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest" }, { - "name": "name", + "name": "execution", "type": "str" }, + { + "name": "events", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Event]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -19039,14 +23760,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.DataLabelingJob", - "shortName": "get_data_labeling_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse", + "shortName": "add_execution_events" }, - "description": "Sample for GetDataLabelingJob", - "file": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_sync.py", + "description": "Sample for AddExecutionEvents", + "file": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetDataLabelingJob_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddExecutionEvents_sync", "segments": [ { "end": 51, @@ -19079,32 +23800,40 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_data_labeling_job_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetHyperparameterTuningJob" + "shortName": "CreateArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + }, + { + "name": "artifact_id", "type": "str" }, { @@ -19120,14 +23849,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", - "shortName": "get_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "create_artifact" }, - "description": "Sample for GetHyperparameterTuningJob", - "file": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_async.py", + "description": "Sample for CreateArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_create_artifact_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateArtifact_async", "segments": [ { "end": 51, @@ -19160,31 +23889,39 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_create_artifact_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_hyperparameter_tuning_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetHyperparameterTuningJob" + "shortName": "CreateArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetHyperparameterTuningJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "artifact", + "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + }, + { + "name": "artifact_id", "type": "str" }, { @@ -19200,14 +23937,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.HyperparameterTuningJob", - "shortName": "get_hyperparameter_tuning_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "create_artifact" }, - "description": "Sample for GetHyperparameterTuningJob", - "file": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py", + "description": "Sample for CreateArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_create_artifact_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetHyperparameterTuningJob_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateArtifact_sync", "segments": [ { "end": 51, @@ -19240,32 +23977,40 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_hyperparameter_tuning_job_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_create_artifact_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_context", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateContext", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetModelDeploymentMonitoringJob" + "shortName": "CreateContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateContextRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1beta1.types.Context" + }, + { + "name": "context_id", "type": "str" }, { @@ -19281,14 +24026,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", - "shortName": "get_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "create_context" }, - "description": "Sample for GetModelDeploymentMonitoringJob", - "file": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_async.py", + "description": "Sample for CreateContext", + "file": "aiplatform_v1beta1_generated_metadata_service_create_context_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateContext_async", "segments": [ { "end": 51, @@ -19321,31 +24066,39 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_create_context_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_context", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateContext", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetModelDeploymentMonitoringJob" + "shortName": "CreateContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateContextRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1beta1.types.Context" + }, + { + "name": "context_id", "type": "str" }, { @@ -19361,14 +24114,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob", - "shortName": "get_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "create_context" }, - "description": "Sample for GetModelDeploymentMonitoringJob", - "file": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_sync.py", + "description": "Sample for CreateContext", + "file": "aiplatform_v1beta1_generated_metadata_service_create_context_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateContext_sync", "segments": [ { "end": 51, @@ -19401,32 +24154,40 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_create_context_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_nas_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_execution", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetNasJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetNasJob" + "shortName": "CreateExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetNasJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1beta1.types.Execution" + }, + { + "name": "execution_id", "type": "str" }, { @@ -19442,14 +24203,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.NasJob", - "shortName": "get_nas_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "create_execution" }, - "description": "Sample for GetNasJob", - "file": "aiplatform_v1beta1_generated_job_service_get_nas_job_async.py", + "description": "Sample for CreateExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_create_execution_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetNasJob_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateExecution_async", "segments": [ { "end": 51, @@ -19482,31 +24243,39 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_nas_job_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_create_execution_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_nas_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_execution", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetNasJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetNasJob" + "shortName": "CreateExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetNasJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "execution", + "type": "google.cloud.aiplatform_v1beta1.types.Execution" + }, + { + "name": "execution_id", "type": "str" }, { @@ -19522,14 +24291,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.NasJob", - "shortName": "get_nas_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "create_execution" }, - "description": "Sample for GetNasJob", - "file": "aiplatform_v1beta1_generated_job_service_get_nas_job_sync.py", + "description": "Sample for CreateExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_create_execution_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetNasJob_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateExecution_sync", "segments": [ { "end": 51, @@ -19562,32 +24331,40 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_nas_job_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_create_execution_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.get_nas_trial_detail", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_metadata_schema", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetNasTrialDetail", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetNasTrialDetail" + "shortName": "CreateMetadataSchema" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetNasTrialDetailRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "metadata_schema", + "type": "google.cloud.aiplatform_v1beta1.types.MetadataSchema" + }, + { + "name": "metadata_schema_id", "type": "str" }, { @@ -19603,22 +24380,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.NasTrialDetail", - "shortName": "get_nas_trial_detail" + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", + "shortName": "create_metadata_schema" }, - "description": "Sample for GetNasTrialDetail", - "file": "aiplatform_v1beta1_generated_job_service_get_nas_trial_detail_async.py", + "description": "Sample for CreateMetadataSchema", + "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetNasTrialDetail_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataSchema_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -19628,46 +24405,54 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 52, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_nas_trial_detail_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.get_nas_trial_detail", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_metadata_schema", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.GetNasTrialDetail", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "GetNasTrialDetail" + "shortName": "CreateMetadataSchema" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetNasTrialDetailRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "metadata_schema", + "type": "google.cloud.aiplatform_v1beta1.types.MetadataSchema" + }, + { + "name": "metadata_schema_id", "type": "str" }, { @@ -19682,23 +24467,23 @@ "name": "metadata", "type": "Sequence[Tuple[str, str]" } - ], - "resultType": "google.cloud.aiplatform_v1beta1.types.NasTrialDetail", - "shortName": "get_nas_trial_detail" + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", + "shortName": "create_metadata_schema" }, - "description": "Sample for GetNasTrialDetail", - "file": "aiplatform_v1beta1_generated_job_service_get_nas_trial_detail_sync.py", + "description": "Sample for CreateMetadataSchema", + "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_GetNasTrialDetail_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataSchema_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -19708,49 +24493,57 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 52, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_get_nas_trial_detail_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_batch_prediction_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_metadata_store", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListBatchPredictionJobs" + "shortName": "CreateMetadataStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest" }, { "name": "parent", "type": "str" }, + { + "name": "metadata_store", + "type": "google.cloud.aiplatform_v1beta1.types.MetadataStore" + }, + { + "name": "metadata_store_id", + "type": "str" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -19764,22 +24557,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsAsyncPager", - "shortName": "list_batch_prediction_jobs" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_metadata_store" }, - "description": "Sample for ListBatchPredictionJobs", - "file": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py", + "description": "Sample for CreateMetadataStore", + "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataStore_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -19794,43 +24587,51 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_batch_prediction_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_metadata_store", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListBatchPredictionJobs" + "shortName": "CreateMetadataStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListBatchPredictionJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest" }, { "name": "parent", "type": "str" }, + { + "name": "metadata_store", + "type": "google.cloud.aiplatform_v1beta1.types.MetadataStore" + }, + { + "name": "metadata_store_id", + "type": "str" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -19844,22 +24645,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListBatchPredictionJobsPager", - "shortName": "list_batch_prediction_jobs" + "resultType": "google.api_core.operation.Operation", + "shortName": "create_metadata_store" }, - "description": "Sample for ListBatchPredictionJobs", - "file": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_sync.py", + "description": "Sample for CreateMetadataStore", + "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListBatchPredictionJobs_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataStore_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -19874,42 +24675,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_batch_prediction_jobs_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_custom_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListCustomJobs" + "shortName": "DeleteArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -19925,22 +24726,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsAsyncPager", - "shortName": "list_custom_jobs" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_artifact" }, - "description": "Sample for ListCustomJobs", - "file": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_async.py", + "description": "Sample for DeleteArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListCustomJobs_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteArtifact_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -19955,41 +24756,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_custom_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListCustomJobs" + "shortName": "DeleteArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListCustomJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20005,22 +24806,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListCustomJobsPager", - "shortName": "list_custom_jobs" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_artifact" }, - "description": "Sample for ListCustomJobs", - "file": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_sync.py", + "description": "Sample for DeleteArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListCustomJobs_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteArtifact_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -20035,42 +24836,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_custom_jobs_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_data_labeling_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_context", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListDataLabelingJobs" + "shortName": "DeleteContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteContextRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20086,22 +24887,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsAsyncPager", - "shortName": "list_data_labeling_jobs" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_context" }, - "description": "Sample for ListDataLabelingJobs", - "file": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_async.py", + "description": "Sample for DeleteContext", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_context_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListDataLabelingJobs_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteContext_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -20116,41 +24917,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_delete_context_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_data_labeling_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_context", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListDataLabelingJobs" + "shortName": "DeleteContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListDataLabelingJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteContextRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20166,22 +24967,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListDataLabelingJobsPager", - "shortName": "list_data_labeling_jobs" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_context" }, - "description": "Sample for ListDataLabelingJobs", - "file": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_sync.py", + "description": "Sample for DeleteContext", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_context_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListDataLabelingJobs_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteContext_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -20196,42 +24997,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_data_labeling_jobs_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_delete_context_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_hyperparameter_tuning_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_execution", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListHyperparameterTuningJobs" + "shortName": "DeleteExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20247,22 +25048,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsAsyncPager", - "shortName": "list_hyperparameter_tuning_jobs" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_execution" }, - "description": "Sample for ListHyperparameterTuningJobs", - "file": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_async.py", + "description": "Sample for DeleteExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_execution_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListHyperparameterTuningJobs_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteExecution_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -20277,41 +25078,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_delete_execution_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_hyperparameter_tuning_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_execution", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListHyperparameterTuningJobs" + "shortName": "DeleteExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListHyperparameterTuningJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20327,22 +25128,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListHyperparameterTuningJobsPager", - "shortName": "list_hyperparameter_tuning_jobs" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_execution" }, - "description": "Sample for ListHyperparameterTuningJobs", - "file": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py", + "description": "Sample for DeleteExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_execution_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListHyperparameterTuningJobs_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteExecution_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -20357,42 +25158,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_hyperparameter_tuning_jobs_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_delete_execution_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_model_deployment_monitoring_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_metadata_store", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListModelDeploymentMonitoringJobs" + "shortName": "DeleteMetadataStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20408,22 +25209,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsAsyncPager", - "shortName": "list_model_deployment_monitoring_jobs" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_metadata_store" }, - "description": "Sample for ListModelDeploymentMonitoringJobs", - "file": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_async.py", + "description": "Sample for DeleteMetadataStore", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListModelDeploymentMonitoringJobs_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteMetadataStore_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -20438,41 +25239,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_model_deployment_monitoring_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_metadata_store", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListModelDeploymentMonitoringJobs", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListModelDeploymentMonitoringJobs" + "shortName": "DeleteMetadataStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListModelDeploymentMonitoringJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20488,22 +25289,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListModelDeploymentMonitoringJobsPager", - "shortName": "list_model_deployment_monitoring_jobs" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_metadata_store" }, - "description": "Sample for ListModelDeploymentMonitoringJobs", - "file": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py", + "description": "Sample for DeleteMetadataStore", + "file": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListModelDeploymentMonitoringJobs_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteMetadataStore_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -20518,42 +25319,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_model_deployment_monitoring_jobs_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_nas_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListNasJobs", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListNasJobs" + "shortName": "GetArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListNasJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetArtifactRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20569,22 +25370,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListNasJobsAsyncPager", - "shortName": "list_nas_jobs" + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "get_artifact" }, - "description": "Sample for ListNasJobs", - "file": "aiplatform_v1beta1_generated_job_service_list_nas_jobs_async.py", + "description": "Sample for GetArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_get_artifact_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListNasJobs_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetArtifact_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -20604,36 +25405,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_nas_jobs_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_get_artifact_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_nas_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListNasJobs", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListNasJobs" + "shortName": "GetArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListNasJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetArtifactRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20649,22 +25450,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListNasJobsPager", - "shortName": "list_nas_jobs" + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "get_artifact" }, - "description": "Sample for ListNasJobs", - "file": "aiplatform_v1beta1_generated_job_service_list_nas_jobs_sync.py", + "description": "Sample for GetArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_get_artifact_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListNasJobs_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetArtifact_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -20684,37 +25485,37 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_nas_jobs_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_get_artifact_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.list_nas_trial_details", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_context", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListNasTrialDetails", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetContext", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListNasTrialDetails" + "shortName": "GetContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListNasTrialDetailsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetContextRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20730,22 +25531,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListNasTrialDetailsAsyncPager", - "shortName": "list_nas_trial_details" + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "get_context" }, - "description": "Sample for ListNasTrialDetails", - "file": "aiplatform_v1beta1_generated_job_service_list_nas_trial_details_async.py", + "description": "Sample for GetContext", + "file": "aiplatform_v1beta1_generated_metadata_service_get_context_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListNasTrialDetails_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetContext_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -20765,36 +25566,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_nas_trial_details_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_get_context_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.list_nas_trial_details", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_context", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ListNasTrialDetails", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetContext", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ListNasTrialDetails" + "shortName": "GetContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListNasTrialDetailsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetContextRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -20810,22 +25611,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.ListNasTrialDetailsPager", - "shortName": "list_nas_trial_details" + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "get_context" }, - "description": "Sample for ListNasTrialDetails", - "file": "aiplatform_v1beta1_generated_job_service_list_nas_trial_details_sync.py", + "description": "Sample for GetContext", + "file": "aiplatform_v1beta1_generated_metadata_service_get_context_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ListNasTrialDetails_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetContext_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -20845,34 +25646,34 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_list_nas_trial_details_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_get_context_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.pause_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_execution", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetExecution", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "PauseModelDeploymentMonitoringJob" + "shortName": "GetExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetExecutionRequest" }, { "name": "name", @@ -20891,21 +25692,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "pause_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "get_execution" }, - "description": "Sample for PauseModelDeploymentMonitoringJob", - "file": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_async.py", + "description": "Sample for GetExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_get_execution_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_PauseModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetExecution_async", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -20920,36 +25722,38 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_get_execution_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.pause_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_execution", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.PauseModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetExecution", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "PauseModelDeploymentMonitoringJob" + "shortName": "GetExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.PauseModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetExecutionRequest" }, { "name": "name", @@ -20968,21 +25772,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "pause_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "get_execution" }, - "description": "Sample for PauseModelDeploymentMonitoringJob", - "file": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_sync.py", + "description": "Sample for GetExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_get_execution_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_PauseModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetExecution_sync", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -20997,37 +25802,39 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_pause_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_get_execution_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.resume_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_metadata_schema", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ResumeModelDeploymentMonitoringJob" + "shortName": "GetMetadataSchema" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest" }, { "name": "name", @@ -21046,21 +25853,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "resume_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", + "shortName": "get_metadata_schema" }, - "description": "Sample for ResumeModelDeploymentMonitoringJob", - "file": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_async.py", + "description": "Sample for GetMetadataSchema", + "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ResumeModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataSchema_async", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -21075,36 +25883,38 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.resume_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_metadata_schema", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.ResumeModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ResumeModelDeploymentMonitoringJob" + "shortName": "GetMetadataSchema" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ResumeModelDeploymentMonitoringJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest" }, { "name": "name", @@ -21123,21 +25933,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "resume_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", + "shortName": "get_metadata_schema" }, - "description": "Sample for ResumeModelDeploymentMonitoringJob", - "file": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_sync.py", + "description": "Sample for GetMetadataSchema", + "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_ResumeModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataSchema_sync", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -21152,44 +25963,42 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_resume_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.search_model_deployment_monitoring_stats_anomalies", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_metadata_store", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" + "shortName": "GetMetadataStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" - }, - { - "name": "model_deployment_monitoring_job", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest" }, { - "name": "deployed_model_id", + "name": "name", "type": "str" }, { @@ -21205,22 +26014,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesAsyncPager", - "shortName": "search_model_deployment_monitoring_stats_anomalies" + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataStore", + "shortName": "get_metadata_store" }, - "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", - "file": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py", + "description": "Sample for GetMetadataStore", + "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataStore_async", "segments": [ { - "end": 53, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 53, + "end": 51, "start": 27, "type": "SHORT" }, @@ -21230,50 +26039,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 54, - "start": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.search_model_deployment_monitoring_stats_anomalies", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_metadata_store", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.SearchModelDeploymentMonitoringStatsAnomalies", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "SearchModelDeploymentMonitoringStatsAnomalies" + "shortName": "GetMetadataStore" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.SearchModelDeploymentMonitoringStatsAnomaliesRequest" - }, - { - "name": "model_deployment_monitoring_job", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest" }, { - "name": "deployed_model_id", + "name": "name", "type": "str" }, { @@ -21289,22 +26094,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.job_service.pagers.SearchModelDeploymentMonitoringStatsAnomaliesPager", - "shortName": "search_model_deployment_monitoring_stats_anomalies" + "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataStore", + "shortName": "get_metadata_store" }, - "description": "Sample for SearchModelDeploymentMonitoringStatsAnomalies", - "file": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py", + "description": "Sample for GetMetadataStore", + "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_SearchModelDeploymentMonitoringStatsAnomalies_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataStore_sync", "segments": [ { - "end": 53, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 53, + "end": 51, "start": 27, "type": "SHORT" }, @@ -21314,52 +26119,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 54, - "start": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_search_model_deployment_monitoring_stats_anomalies_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient", - "shortName": "JobServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceAsyncClient.update_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_artifacts", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "UpdateModelDeploymentMonitoringJob" + "shortName": "ListArtifacts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest" - }, - { - "name": "model_deployment_monitoring_job", - "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + "type": "google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "parent", + "type": "str" }, { "name": "retry", @@ -21374,22 +26175,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsAsyncPager", + "shortName": "list_artifacts" }, - "description": "Sample for UpdateModelDeploymentMonitoringJob", - "file": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py", + "description": "Sample for ListArtifacts", + "file": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListArtifacts_async", "segments": [ { - "end": 59, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 52, "start": 27, "type": "SHORT" }, @@ -21399,51 +26200,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient", - "shortName": "JobServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.JobServiceClient.update_model_deployment_monitoring_job", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_artifacts", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService.UpdateModelDeploymentMonitoringJob", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.JobService", - "shortName": "JobService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "UpdateModelDeploymentMonitoringJob" + "shortName": "ListArtifacts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelDeploymentMonitoringJobRequest" - }, - { - "name": "model_deployment_monitoring_job", - "type": "google.cloud.aiplatform_v1beta1.types.ModelDeploymentMonitoringJob" + "type": "google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "parent", + "type": "str" }, { "name": "retry", @@ -21458,22 +26255,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_model_deployment_monitoring_job" + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsPager", + "shortName": "list_artifacts" }, - "description": "Sample for UpdateModelDeploymentMonitoringJob", - "file": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_sync.py", + "description": "Sample for ListArtifacts", + "file": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_JobService_UpdateModelDeploymentMonitoringJob_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListArtifacts_sync", "segments": [ { - "end": 59, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 52, "start": 27, "type": "SHORT" }, @@ -21483,53 +26280,49 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_job_service_update_model_deployment_monitoring_job_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.LlmUtilityServiceAsyncClient", - "shortName": "LlmUtilityServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.LlmUtilityServiceAsyncClient.compute_tokens", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_contexts", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.LlmUtilityService.ComputeTokens", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListContexts", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.LlmUtilityService", - "shortName": "LlmUtilityService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ComputeTokens" + "shortName": "ListContexts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ComputeTokensRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListContextsRequest" }, { - "name": "endpoint", + "name": "parent", "type": "str" }, - { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -21543,22 +26336,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ComputeTokensResponse", - "shortName": "compute_tokens" + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsAsyncPager", + "shortName": "list_contexts" }, - "description": "Sample for ComputeTokens", - "file": "aiplatform_v1beta1_generated_llm_utility_service_compute_tokens_async.py", + "description": "Sample for ListContexts", + "file": "aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_LlmUtilityService_ComputeTokens_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListContexts_async", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -21568,52 +26361,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_llm_utility_service_compute_tokens_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.LlmUtilityServiceClient", - "shortName": "LlmUtilityServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.LlmUtilityServiceClient.compute_tokens", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_contexts", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.LlmUtilityService.ComputeTokens", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListContexts", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.LlmUtilityService", - "shortName": "LlmUtilityService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ComputeTokens" + "shortName": "ListContexts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ComputeTokensRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListContextsRequest" }, { - "name": "endpoint", + "name": "parent", "type": "str" }, - { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -21627,22 +26416,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ComputeTokensResponse", - "shortName": "compute_tokens" + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsPager", + "shortName": "list_contexts" }, - "description": "Sample for ComputeTokens", - "file": "aiplatform_v1beta1_generated_llm_utility_service_compute_tokens_sync.py", + "description": "Sample for ListContexts", + "file": "aiplatform_v1beta1_generated_metadata_service_list_contexts_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_LlmUtilityService_ComputeTokens_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListContexts_sync", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -21652,44 +26441,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_llm_utility_service_compute_tokens_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_list_contexts_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceAsyncClient", - "shortName": "MatchServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceAsyncClient.find_neighbors", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_executions", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MatchService.FindNeighbors", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MatchService", - "shortName": "MatchService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "FindNeighbors" + "shortName": "ListExecutions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.FindNeighborsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest" + }, + { + "name": "parent", + "type": "str" }, { "name": "retry", @@ -21704,22 +26497,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.FindNeighborsResponse", - "shortName": "find_neighbors" + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsAsyncPager", + "shortName": "list_executions" }, - "description": "Sample for FindNeighbors", - "file": "aiplatform_v1beta1_generated_match_service_find_neighbors_async.py", + "description": "Sample for ListExecutions", + "file": "aiplatform_v1beta1_generated_metadata_service_list_executions_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MatchService_FindNeighbors_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListExecutions_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -21739,33 +26532,37 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_match_service_find_neighbors_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_list_executions_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceClient", - "shortName": "MatchServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceClient.find_neighbors", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_executions", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MatchService.FindNeighbors", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MatchService", - "shortName": "MatchService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "FindNeighbors" + "shortName": "ListExecutions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.FindNeighborsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest" + }, + { + "name": "parent", + "type": "str" }, { "name": "retry", @@ -21780,22 +26577,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.FindNeighborsResponse", - "shortName": "find_neighbors" + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsPager", + "shortName": "list_executions" }, - "description": "Sample for FindNeighbors", - "file": "aiplatform_v1beta1_generated_match_service_find_neighbors_sync.py", + "description": "Sample for ListExecutions", + "file": "aiplatform_v1beta1_generated_metadata_service_list_executions_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MatchService_FindNeighbors_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListExecutions_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -21815,34 +26612,38 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_match_service_find_neighbors_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_list_executions_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceAsyncClient", - "shortName": "MatchServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceAsyncClient.read_index_datapoints", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_metadata_schemas", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MatchService.ReadIndexDatapoints", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MatchService", - "shortName": "MatchService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ReadIndexDatapoints" + "shortName": "ListMetadataSchemas" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ReadIndexDatapointsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest" + }, + { + "name": "parent", + "type": "str" }, { "name": "retry", @@ -21857,22 +26658,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ReadIndexDatapointsResponse", - "shortName": "read_index_datapoints" + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager", + "shortName": "list_metadata_schemas" }, - "description": "Sample for ReadIndexDatapoints", - "file": "aiplatform_v1beta1_generated_match_service_read_index_datapoints_async.py", + "description": "Sample for ListMetadataSchemas", + "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MatchService_ReadIndexDatapoints_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataSchemas_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -21892,33 +26693,37 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_match_service_read_index_datapoints_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceClient", - "shortName": "MatchServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MatchServiceClient.read_index_datapoints", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_metadata_schemas", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MatchService.ReadIndexDatapoints", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MatchService", - "shortName": "MatchService" + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" }, - "shortName": "ReadIndexDatapoints" + "shortName": "ListMetadataSchemas" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ReadIndexDatapointsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest" + }, + { + "name": "parent", + "type": "str" }, { "name": "retry", @@ -21933,22 +26738,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ReadIndexDatapointsResponse", - "shortName": "read_index_datapoints" + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasPager", + "shortName": "list_metadata_schemas" }, - "description": "Sample for ReadIndexDatapoints", - "file": "aiplatform_v1beta1_generated_match_service_read_index_datapoints_sync.py", + "description": "Sample for ListMetadataSchemas", + "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MatchService_ReadIndexDatapoints_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataSchemas_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -21968,12 +26773,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_match_service_read_index_datapoints_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_sync.py" }, { "canonical": true, @@ -21983,32 +26788,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.add_context_artifacts_and_executions", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_metadata_stores", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "AddContextArtifactsAndExecutions" + "shortName": "ListMetadataStores" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest" }, { - "name": "context", + "name": "parent", "type": "str" }, - { - "name": "artifacts", - "type": "MutableSequence[str]" - }, - { - "name": "executions", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -22022,22 +26819,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse", - "shortName": "add_context_artifacts_and_executions" + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresAsyncPager", + "shortName": "list_metadata_stores" }, - "description": "Sample for AddContextArtifactsAndExecutions", - "file": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_async.py", + "description": "Sample for ListMetadataStores", + "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextArtifactsAndExecutions_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataStores_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -22057,12 +26854,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py" }, { "canonical": true, @@ -22071,32 +26868,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.add_context_artifacts_and_executions", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_metadata_stores", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextArtifactsAndExecutions", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "AddContextArtifactsAndExecutions" + "shortName": "ListMetadataStores" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest" }, { - "name": "context", + "name": "parent", "type": "str" }, - { - "name": "artifacts", - "type": "MutableSequence[str]" - }, - { - "name": "executions", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -22110,22 +26899,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextArtifactsAndExecutionsResponse", - "shortName": "add_context_artifacts_and_executions" + "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresPager", + "shortName": "list_metadata_stores" }, - "description": "Sample for AddContextArtifactsAndExecutions", - "file": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_sync.py", + "description": "Sample for ListMetadataStores", + "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextArtifactsAndExecutions_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataStores_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -22145,12 +26934,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_add_context_artifacts_and_executions_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_sync.py" }, { "canonical": true, @@ -22160,28 +26949,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.add_context_children", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.purge_artifacts", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "AddContextChildren" + "shortName": "PurgeArtifacts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest" + "type": "google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest" }, { - "name": "context", + "name": "parent", "type": "str" }, - { - "name": "child_contexts", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -22195,22 +26980,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse", - "shortName": "add_context_children" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_artifacts" }, - "description": "Sample for AddContextChildren", - "file": "aiplatform_v1beta1_generated_metadata_service_add_context_children_async.py", + "description": "Sample for PurgeArtifacts", + "file": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextChildren_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeArtifacts_async", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -22220,22 +27005,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_add_context_children_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_async.py" }, { "canonical": true, @@ -22244,28 +27029,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.add_context_children", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.purge_artifacts", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddContextChildren", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "AddContextChildren" + "shortName": "PurgeArtifacts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenRequest" + "type": "google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest" }, { - "name": "context", + "name": "parent", "type": "str" }, - { - "name": "child_contexts", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -22279,22 +27060,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.AddContextChildrenResponse", - "shortName": "add_context_children" + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_artifacts" }, - "description": "Sample for AddContextChildren", - "file": "aiplatform_v1beta1_generated_metadata_service_add_context_children_sync.py", + "description": "Sample for PurgeArtifacts", + "file": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddContextChildren_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeArtifacts_sync", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -22304,22 +27085,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_add_context_children_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_sync.py" }, { "canonical": true, @@ -22329,28 +27110,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.add_execution_events", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.purge_contexts", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "AddExecutionEvents" + "shortName": "PurgeContexts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest" }, { - "name": "execution", + "name": "parent", "type": "str" }, - { - "name": "events", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Event]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -22364,22 +27141,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse", - "shortName": "add_execution_events" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_contexts" }, - "description": "Sample for AddExecutionEvents", - "file": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_async.py", + "description": "Sample for PurgeContexts", + "file": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddExecutionEvents_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeContexts_async", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -22389,22 +27166,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_async.py" }, { "canonical": true, @@ -22413,28 +27190,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.add_execution_events", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.purge_contexts", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.AddExecutionEvents", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "AddExecutionEvents" + "shortName": "PurgeContexts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest" }, { - "name": "execution", + "name": "parent", "type": "str" }, - { - "name": "events", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Event]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -22448,22 +27221,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.AddExecutionEventsResponse", - "shortName": "add_execution_events" + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_contexts" }, - "description": "Sample for AddExecutionEvents", - "file": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_sync.py", + "description": "Sample for PurgeContexts", + "file": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_AddExecutionEvents_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeContexts_sync", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -22473,22 +27246,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_add_execution_events_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_sync.py" }, { "canonical": true, @@ -22498,32 +27271,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_artifact", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.purge_executions", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateArtifact" + "shortName": "PurgeExecutions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest" + "type": "google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "artifact", - "type": "google.cloud.aiplatform_v1beta1.types.Artifact" - }, - { - "name": "artifact_id", - "type": "str" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -22537,22 +27302,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", - "shortName": "create_artifact" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "purge_executions" }, - "description": "Sample for CreateArtifact", - "file": "aiplatform_v1beta1_generated_metadata_service_create_artifact_async.py", + "description": "Sample for PurgeExecutions", + "file": "aiplatform_v1beta1_generated_metadata_service_purge_executions_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateArtifact_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeExecutions_async", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -22562,22 +27327,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_create_artifact_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_purge_executions_async.py" }, { "canonical": true, @@ -22586,30 +27351,103 @@ "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_artifact", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.purge_executions", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateArtifact", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateArtifact" + "shortName": "PurgeExecutions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateArtifactRequest" + "type": "google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest" }, { "name": "parent", "type": "str" }, { - "name": "artifact", - "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + "name": "retry", + "type": "google.api_core.retry.Retry" }, { - "name": "artifact_id", + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "purge_executions" + }, + "description": "Sample for PurgeExecutions", + "file": "aiplatform_v1beta1_generated_metadata_service_purge_executions_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeExecutions_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_metadata_service_purge_executions_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.query_artifact_lineage_subgraph", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", + "shortName": "MetadataService" + }, + "shortName": "QueryArtifactLineageSubgraph" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest" + }, + { + "name": "artifact", "type": "str" }, { @@ -22625,14 +27463,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", - "shortName": "create_artifact" + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_artifact_lineage_subgraph" }, - "description": "Sample for CreateArtifact", - "file": "aiplatform_v1beta1_generated_metadata_service_create_artifact_sync.py", + "description": "Sample for QueryArtifactLineageSubgraph", + "file": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateArtifact_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryArtifactLineageSubgraph_async", "segments": [ { "end": 51, @@ -22665,40 +27503,31 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_create_artifact_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_context", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.query_artifact_lineage_subgraph", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateContext", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateContext" + "shortName": "QueryArtifactLineageSubgraph" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateContextRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "context", - "type": "google.cloud.aiplatform_v1beta1.types.Context" + "type": "google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest" }, { - "name": "context_id", + "name": "artifact", "type": "str" }, { @@ -22714,14 +27543,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Context", - "shortName": "create_context" + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_artifact_lineage_subgraph" }, - "description": "Sample for CreateContext", - "file": "aiplatform_v1beta1_generated_metadata_service_create_context_async.py", + "description": "Sample for QueryArtifactLineageSubgraph", + "file": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateContext_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryArtifactLineageSubgraph_sync", "segments": [ { "end": 51, @@ -22754,39 +27583,32 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_create_context_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_context", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.query_context_lineage_subgraph", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateContext", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateContext" + "shortName": "QueryContextLineageSubgraph" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateContextRequest" - }, - { - "name": "parent", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest" }, { "name": "context", - "type": "google.cloud.aiplatform_v1beta1.types.Context" - }, - { - "name": "context_id", "type": "str" }, { @@ -22802,14 +27624,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Context", - "shortName": "create_context" + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_context_lineage_subgraph" }, - "description": "Sample for CreateContext", - "file": "aiplatform_v1beta1_generated_metadata_service_create_context_sync.py", + "description": "Sample for QueryContextLineageSubgraph", + "file": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateContext_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryContextLineageSubgraph_async", "segments": [ { "end": 51, @@ -22842,40 +27664,31 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_create_context_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_execution", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.query_context_lineage_subgraph", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateExecution" + "shortName": "QueryContextLineageSubgraph" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "execution", - "type": "google.cloud.aiplatform_v1beta1.types.Execution" + "type": "google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest" }, { - "name": "execution_id", + "name": "context", "type": "str" }, { @@ -22891,14 +27704,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", - "shortName": "create_execution" + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_context_lineage_subgraph" }, - "description": "Sample for CreateExecution", - "file": "aiplatform_v1beta1_generated_metadata_service_create_execution_async.py", + "description": "Sample for QueryContextLineageSubgraph", + "file": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateExecution_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryContextLineageSubgraph_sync", "segments": [ { "end": 51, @@ -22931,39 +27744,32 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_create_execution_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_execution", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.query_execution_inputs_and_outputs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateExecution", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateExecution" + "shortName": "QueryExecutionInputsAndOutputs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateExecutionRequest" - }, - { - "name": "parent", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest" }, { "name": "execution", - "type": "google.cloud.aiplatform_v1beta1.types.Execution" - }, - { - "name": "execution_id", "type": "str" }, { @@ -22979,14 +27785,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", - "shortName": "create_execution" + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_execution_inputs_and_outputs" }, - "description": "Sample for CreateExecution", - "file": "aiplatform_v1beta1_generated_metadata_service_create_execution_sync.py", + "description": "Sample for QueryExecutionInputsAndOutputs", + "file": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateExecution_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryExecutionInputsAndOutputs_async", "segments": [ { "end": 51, @@ -23019,40 +27825,31 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_create_execution_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_metadata_schema", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.query_execution_inputs_and_outputs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateMetadataSchema" + "shortName": "QueryExecutionInputsAndOutputs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "metadata_schema", - "type": "google.cloud.aiplatform_v1beta1.types.MetadataSchema" + "type": "google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest" }, { - "name": "metadata_schema_id", + "name": "execution", "type": "str" }, { @@ -23068,22 +27865,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", - "shortName": "create_metadata_schema" + "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", + "shortName": "query_execution_inputs_and_outputs" }, - "description": "Sample for CreateMetadataSchema", - "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_async.py", + "description": "Sample for QueryExecutionInputsAndOutputs", + "file": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataSchema_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryExecutionInputsAndOutputs_sync", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -23093,55 +27890,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_metadata_schema", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.remove_context_children", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataSchema", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.RemoveContextChildren", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateMetadataSchema" + "shortName": "RemoveContextChildren" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataSchemaRequest" + "type": "google.cloud.aiplatform_v1beta1.types.RemoveContextChildrenRequest" }, { - "name": "parent", + "name": "context", "type": "str" }, { - "name": "metadata_schema", - "type": "google.cloud.aiplatform_v1beta1.types.MetadataSchema" - }, - { - "name": "metadata_schema_id", - "type": "str" + "name": "child_contexts", + "type": "MutableSequence[str]" }, { "name": "retry", @@ -23156,22 +27950,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", - "shortName": "create_metadata_schema" + "resultType": "google.cloud.aiplatform_v1beta1.types.RemoveContextChildrenResponse", + "shortName": "remove_context_children" }, - "description": "Sample for CreateMetadataSchema", - "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_sync.py", + "description": "Sample for RemoveContextChildren", + "file": "aiplatform_v1beta1_generated_metadata_service_remove_context_children_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataSchema_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_RemoveContextChildren_async", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -23181,56 +27975,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_schema_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_remove_context_children_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.create_metadata_store", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.remove_context_children", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.RemoveContextChildren", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateMetadataStore" + "shortName": "RemoveContextChildren" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.RemoveContextChildrenRequest" }, { - "name": "parent", + "name": "context", "type": "str" }, { - "name": "metadata_store", - "type": "google.cloud.aiplatform_v1beta1.types.MetadataStore" - }, - { - "name": "metadata_store_id", - "type": "str" + "name": "child_contexts", + "type": "MutableSequence[str]" }, { "name": "retry", @@ -23245,22 +28034,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_metadata_store" + "resultType": "google.cloud.aiplatform_v1beta1.types.RemoveContextChildrenResponse", + "shortName": "remove_context_children" }, - "description": "Sample for CreateMetadataStore", - "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_async.py", + "description": "Sample for RemoveContextChildren", + "file": "aiplatform_v1beta1_generated_metadata_service_remove_context_children_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataStore_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_RemoveContextChildren_sync", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -23275,50 +28064,47 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_remove_context_children_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.create_metadata_store", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.update_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.CreateMetadataStore", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "CreateMetadataStore" + "shortName": "UpdateArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateMetadataStoreRequest" - }, - { - "name": "parent", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest" }, { - "name": "metadata_store", - "type": "google.cloud.aiplatform_v1beta1.types.MetadataStore" + "name": "artifact", + "type": "google.cloud.aiplatform_v1beta1.types.Artifact" }, { - "name": "metadata_store_id", - "type": "str" + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -23333,22 +28119,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_metadata_store" + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "update_artifact" }, - "description": "Sample for CreateMetadataStore", - "file": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_sync.py", + "description": "Sample for UpdateArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_update_artifact_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_CreateMetadataStore_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateArtifact_async", "segments": [ { - "end": 55, + "end": 50, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 50, "start": 27, "type": "SHORT" }, @@ -23358,48 +28144,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 47, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 51, + "start": 48, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_create_metadata_store_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_update_artifact_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_artifact", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.update_artifact", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteArtifact" + "shortName": "UpdateArtifact" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest" }, { - "name": "name", - "type": "str" + "name": "artifact", + "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -23414,22 +28203,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_artifact" + "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", + "shortName": "update_artifact" }, - "description": "Sample for DeleteArtifact", - "file": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_async.py", + "description": "Sample for UpdateArtifact", + "file": "aiplatform_v1beta1_generated_metadata_service_update_artifact_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteArtifact_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateArtifact_sync", "segments": [ { - "end": 55, + "end": 50, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 50, "start": 27, "type": "SHORT" }, @@ -23439,47 +28228,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 47, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 51, + "start": 48, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_update_artifact_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_artifact", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.update_context", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteArtifact", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteArtifact" + "shortName": "UpdateContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteArtifactRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateContextRequest" + }, + { + "name": "context", + "type": "google.cloud.aiplatform_v1beta1.types.Context" }, { - "name": "name", - "type": "str" + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -23494,22 +28288,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_artifact" + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "update_context" }, - "description": "Sample for DeleteArtifact", - "file": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_sync.py", + "description": "Sample for UpdateContext", + "file": "aiplatform_v1beta1_generated_metadata_service_update_context_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteArtifact_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateContext_async", "segments": [ { - "end": 55, + "end": 50, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 50, "start": 27, "type": "SHORT" }, @@ -23519,48 +28313,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 47, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 51, + "start": 48, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_delete_artifact_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_update_context_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_context", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.update_context", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteContext" + "shortName": "UpdateContext" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteContextRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateContextRequest" }, { - "name": "name", - "type": "str" + "name": "context", + "type": "google.cloud.aiplatform_v1beta1.types.Context" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -23575,22 +28372,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_context" + "resultType": "google.cloud.aiplatform_v1beta1.types.Context", + "shortName": "update_context" }, - "description": "Sample for DeleteContext", - "file": "aiplatform_v1beta1_generated_metadata_service_delete_context_async.py", + "description": "Sample for UpdateContext", + "file": "aiplatform_v1beta1_generated_metadata_service_update_context_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteContext_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateContext_sync", "segments": [ { - "end": 55, + "end": 50, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 50, "start": 27, "type": "SHORT" }, @@ -23600,47 +28397,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 47, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 51, + "start": 48, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_delete_context_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_update_context_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", + "shortName": "MetadataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_context", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.update_execution", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteContext", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteContext" + "shortName": "UpdateExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteContextRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest" }, { - "name": "name", - "type": "str" + "name": "execution", + "type": "google.cloud.aiplatform_v1beta1.types.Execution" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -23655,22 +28457,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_context" + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "update_execution" }, - "description": "Sample for DeleteContext", - "file": "aiplatform_v1beta1_generated_metadata_service_delete_context_sync.py", + "description": "Sample for UpdateExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_update_execution_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteContext_sync", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateExecution_async", "segments": [ { - "end": 55, + "end": 50, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 50, "start": 27, "type": "SHORT" }, @@ -23680,48 +28482,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 47, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 51, + "start": 48, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_delete_context_sync.py" + "title": "aiplatform_v1beta1_generated_metadata_service_update_execution_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", + "shortName": "MetadataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_execution", + "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.update_execution", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution", + "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution", "service": { "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", "shortName": "MetadataService" }, - "shortName": "DeleteExecution" + "shortName": "UpdateExecution" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest" }, { - "name": "name", - "type": "str" + "name": "execution", + "type": "google.cloud.aiplatform_v1beta1.types.Execution" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -23736,22 +28541,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_execution" + "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", + "shortName": "update_execution" }, - "description": "Sample for DeleteExecution", - "file": "aiplatform_v1beta1_generated_metadata_service_delete_execution_async.py", + "description": "Sample for UpdateExecution", + "file": "aiplatform_v1beta1_generated_metadata_service_update_execution_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteExecution_async", + "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateExecution_sync", "segments": [ { - "end": 55, + "end": 50, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 50, "start": 27, "type": "SHORT" }, @@ -23761,48 +28566,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 47, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 51, + "start": 48, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_delete_execution_async.py" + "title": "aiplatform_v1beta1_generated_metadata_service_update_execution_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient", + "shortName": "MigrationServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_execution", + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient.batch_migrate_resources", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteExecution", + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", + "shortName": "MigrationService" }, - "shortName": "DeleteExecution" + "shortName": "BatchMigrateResources" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteExecutionRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "migrate_resource_requests", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -23816,22 +28626,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_execution" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_migrate_resources" }, - "description": "Sample for DeleteExecution", - "file": "aiplatform_v1beta1_generated_metadata_service_delete_execution_sync.py", + "description": "Sample for BatchMigrateResources", + "file": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteExecution_sync", + "regionTag": "aiplatform_v1beta1_generated_MigrationService_BatchMigrateResources_async", "segments": [ { - "end": 55, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 61, "start": 27, "type": "SHORT" }, @@ -23841,49 +28651,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_delete_execution_sync.py" + "title": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient", + "shortName": "MigrationServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.delete_metadata_store", + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient.batch_migrate_resources", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore", + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", + "shortName": "MigrationService" }, - "shortName": "DeleteMetadataStore" + "shortName": "BatchMigrateResources" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "migrate_resource_requests", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -23897,22 +28710,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_metadata_store" + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_migrate_resources" }, - "description": "Sample for DeleteMetadataStore", - "file": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_async.py", + "description": "Sample for BatchMigrateResources", + "file": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteMetadataStore_async", + "regionTag": "aiplatform_v1beta1_generated_MigrationService_BatchMigrateResources_sync", "segments": [ { - "end": 55, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 61, "start": 27, "type": "SHORT" }, @@ -23922,46 +28735,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_async.py" + "title": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient", + "shortName": "MigrationServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.delete_metadata_store", + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient.search_migratable_resources", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.DeleteMetadataStore", + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", + "shortName": "MigrationService" }, - "shortName": "DeleteMetadataStore" + "shortName": "SearchMigratableResources" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteMetadataStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -23977,22 +28791,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_metadata_store" + "resultType": "google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager", + "shortName": "search_migratable_resources" }, - "description": "Sample for DeleteMetadataStore", - "file": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_sync.py", + "description": "Sample for SearchMigratableResources", + "file": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_DeleteMetadataStore_sync", + "regionTag": "aiplatform_v1beta1_generated_MigrationService_SearchMigratableResources_async", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -24007,42 +28821,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_delete_metadata_store_sync.py" + "title": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient", + "shortName": "MigrationServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_artifact", + "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient.search_migratable_resources", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact", + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", + "shortName": "MigrationService" }, - "shortName": "GetArtifact" + "shortName": "SearchMigratableResources" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetArtifactRequest" + "type": "google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -24058,22 +28871,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", - "shortName": "get_artifact" + "resultType": "google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesPager", + "shortName": "search_migratable_resources" }, - "description": "Sample for GetArtifact", - "file": "aiplatform_v1beta1_generated_metadata_service_get_artifact_async.py", + "description": "Sample for SearchMigratableResources", + "file": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetArtifact_async", + "regionTag": "aiplatform_v1beta1_generated_MigrationService_SearchMigratableResources_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -24093,33 +28906,34 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_get_artifact_async.py" + "title": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceAsyncClient", + "shortName": "ModelGardenServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_artifact", + "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceAsyncClient.get_publisher_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetArtifact", + "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService.GetPublisherModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService", + "shortName": "ModelGardenService" }, - "shortName": "GetArtifact" + "shortName": "GetPublisherModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetArtifactRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetPublisherModelRequest" }, { "name": "name", @@ -24138,14 +28952,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", - "shortName": "get_artifact" + "resultType": "google.cloud.aiplatform_v1beta1.types.PublisherModel", + "shortName": "get_publisher_model" }, - "description": "Sample for GetArtifact", - "file": "aiplatform_v1beta1_generated_metadata_service_get_artifact_sync.py", + "description": "Sample for GetPublisherModel", + "file": "aiplatform_v1beta1_generated_model_garden_service_get_publisher_model_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetArtifact_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelGardenService_GetPublisherModel_async", "segments": [ { "end": 51, @@ -24178,29 +28992,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_get_artifact_sync.py" + "title": "aiplatform_v1beta1_generated_model_garden_service_get_publisher_model_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceClient", + "shortName": "ModelGardenServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_context", + "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceClient.get_publisher_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetContext", + "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService.GetPublisherModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService", + "shortName": "ModelGardenService" }, - "shortName": "GetContext" + "shortName": "GetPublisherModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetContextRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetPublisherModelRequest" }, { "name": "name", @@ -24219,14 +29032,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Context", - "shortName": "get_context" + "resultType": "google.cloud.aiplatform_v1beta1.types.PublisherModel", + "shortName": "get_publisher_model" }, - "description": "Sample for GetContext", - "file": "aiplatform_v1beta1_generated_metadata_service_get_context_async.py", + "description": "Sample for GetPublisherModel", + "file": "aiplatform_v1beta1_generated_model_garden_service_get_publisher_model_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetContext_async", + "regionTag": "aiplatform_v1beta1_generated_ModelGardenService_GetPublisherModel_sync", "segments": [ { "end": 51, @@ -24259,31 +29072,32 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_get_context_async.py" + "title": "aiplatform_v1beta1_generated_model_garden_service_get_publisher_model_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceAsyncClient", + "shortName": "ModelGardenServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_context", + "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceAsyncClient.list_publisher_models", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetContext", + "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService.ListPublisherModels", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService", + "shortName": "ModelGardenService" }, - "shortName": "GetContext" + "shortName": "ListPublisherModels" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetContextRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListPublisherModelsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -24299,22 +29113,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Context", - "shortName": "get_context" + "resultType": "google.cloud.aiplatform_v1beta1.services.model_garden_service.pagers.ListPublisherModelsAsyncPager", + "shortName": "list_publisher_models" }, - "description": "Sample for GetContext", - "file": "aiplatform_v1beta1_generated_metadata_service_get_context_sync.py", + "description": "Sample for ListPublisherModels", + "file": "aiplatform_v1beta1_generated_model_garden_service_list_publisher_models_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetContext_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelGardenService_ListPublisherModels_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -24334,37 +29148,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_get_context_sync.py" + "title": "aiplatform_v1beta1_generated_model_garden_service_list_publisher_models_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceClient", + "shortName": "ModelGardenServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_execution", + "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceClient.list_publisher_models", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetExecution", + "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService.ListPublisherModels", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService", + "shortName": "ModelGardenService" }, - "shortName": "GetExecution" + "shortName": "ListPublisherModels" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetExecutionRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListPublisherModelsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -24380,22 +29193,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", - "shortName": "get_execution" + "resultType": "google.cloud.aiplatform_v1beta1.services.model_garden_service.pagers.ListPublisherModelsPager", + "shortName": "list_publisher_models" }, - "description": "Sample for GetExecution", - "file": "aiplatform_v1beta1_generated_metadata_service_get_execution_async.py", + "description": "Sample for ListPublisherModels", + "file": "aiplatform_v1beta1_generated_model_garden_service_list_publisher_models_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetExecution_async", + "regionTag": "aiplatform_v1beta1_generated_ModelGardenService_ListPublisherModels_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -24415,38 +29228,43 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_get_execution_async.py" + "title": "aiplatform_v1beta1_generated_model_garden_service_list_publisher_models_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_execution", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.batch_import_evaluated_annotations", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetExecution", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.BatchImportEvaluatedAnnotations", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "GetExecution" + "shortName": "BatchImportEvaluatedAnnotations" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetExecutionRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchImportEvaluatedAnnotationsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "evaluated_annotations", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.EvaluatedAnnotation]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -24460,14 +29278,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", - "shortName": "get_execution" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchImportEvaluatedAnnotationsResponse", + "shortName": "batch_import_evaluated_annotations" }, - "description": "Sample for GetExecution", - "file": "aiplatform_v1beta1_generated_metadata_service_get_execution_sync.py", + "description": "Sample for BatchImportEvaluatedAnnotations", + "file": "aiplatform_v1beta1_generated_model_service_batch_import_evaluated_annotations_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetExecution_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_BatchImportEvaluatedAnnotations_async", "segments": [ { "end": 51, @@ -24500,34 +29318,37 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_get_execution_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_batch_import_evaluated_annotations_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_metadata_schema", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.batch_import_evaluated_annotations", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.BatchImportEvaluatedAnnotations", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "GetMetadataSchema" + "shortName": "BatchImportEvaluatedAnnotations" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchImportEvaluatedAnnotationsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "evaluated_annotations", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.EvaluatedAnnotation]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -24541,14 +29362,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", - "shortName": "get_metadata_schema" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchImportEvaluatedAnnotationsResponse", + "shortName": "batch_import_evaluated_annotations" }, - "description": "Sample for GetMetadataSchema", - "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_async.py", + "description": "Sample for BatchImportEvaluatedAnnotations", + "file": "aiplatform_v1beta1_generated_model_service_batch_import_evaluated_annotations_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataSchema_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_BatchImportEvaluatedAnnotations_sync", "segments": [ { "end": 51, @@ -24581,33 +29402,38 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_async.py" + "title": "aiplatform_v1beta1_generated_model_service_batch_import_evaluated_annotations_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_metadata_schema", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.batch_import_model_evaluation_slices", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataSchema", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.BatchImportModelEvaluationSlices", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "GetMetadataSchema" + "shortName": "BatchImportModelEvaluationSlices" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataSchemaRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchImportModelEvaluationSlicesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "model_evaluation_slices", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -24621,14 +29447,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataSchema", - "shortName": "get_metadata_schema" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchImportModelEvaluationSlicesResponse", + "shortName": "batch_import_model_evaluation_slices" }, - "description": "Sample for GetMetadataSchema", - "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_sync.py", + "description": "Sample for BatchImportModelEvaluationSlices", + "file": "aiplatform_v1beta1_generated_model_service_batch_import_model_evaluation_slices_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataSchema_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_BatchImportModelEvaluationSlices_async", "segments": [ { "end": 51, @@ -24661,34 +29487,37 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_schema_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_batch_import_model_evaluation_slices_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.get_metadata_store", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.batch_import_model_evaluation_slices", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.BatchImportModelEvaluationSlices", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "GetMetadataStore" + "shortName": "BatchImportModelEvaluationSlices" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchImportModelEvaluationSlicesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "model_evaluation_slices", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -24702,14 +29531,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataStore", - "shortName": "get_metadata_store" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchImportModelEvaluationSlicesResponse", + "shortName": "batch_import_model_evaluation_slices" }, - "description": "Sample for GetMetadataStore", - "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_async.py", + "description": "Sample for BatchImportModelEvaluationSlices", + "file": "aiplatform_v1beta1_generated_model_service_batch_import_model_evaluation_slices_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataStore_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_BatchImportModelEvaluationSlices_sync", "segments": [ { "end": 51, @@ -24742,31 +29571,36 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_async.py" + "title": "aiplatform_v1beta1_generated_model_service_batch_import_model_evaluation_slices_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.get_metadata_store", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.copy_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.GetMetadataStore", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.CopyModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "GetMetadataStore" + "shortName": "CopyModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetMetadataStoreRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CopyModelRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "source_model", "type": "str" }, { @@ -24782,22 +29616,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.MetadataStore", - "shortName": "get_metadata_store" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "copy_model" }, - "description": "Sample for GetMetadataStore", - "file": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_sync.py", + "description": "Sample for CopyModel", + "file": "aiplatform_v1beta1_generated_model_service_copy_model_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_GetMetadataStore_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_CopyModel_async", "segments": [ { - "end": 51, + "end": 57, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 57, "start": 27, "type": "SHORT" }, @@ -24807,49 +29641,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 47, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 54, + "start": 48, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 58, + "start": 55, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_get_metadata_store_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_copy_model_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_artifacts", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.copy_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.CopyModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "ListArtifacts" + "shortName": "CopyModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CopyModelRequest" }, { "name": "parent", "type": "str" }, + { + "name": "source_model", + "type": "str" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -24863,22 +29700,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsAsyncPager", - "shortName": "list_artifacts" + "resultType": "google.api_core.operation.Operation", + "shortName": "copy_model" }, - "description": "Sample for ListArtifacts", - "file": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_async.py", + "description": "Sample for CopyModel", + "file": "aiplatform_v1beta1_generated_model_service_copy_model_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListArtifacts_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_CopyModel_sync", "segments": [ { - "end": 52, + "end": 57, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 57, "start": 27, "type": "SHORT" }, @@ -24888,46 +29725,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 47, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 54, + "start": 48, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 58, + "start": 55, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_async.py" + "title": "aiplatform_v1beta1_generated_model_service_copy_model_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_artifacts", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.delete_model_version", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListArtifacts", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "ListArtifacts" + "shortName": "DeleteModelVersion" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListArtifactsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelVersionRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -24943,22 +29781,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListArtifactsPager", - "shortName": "list_artifacts" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model_version" }, - "description": "Sample for ListArtifacts", - "file": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_sync.py", + "description": "Sample for DeleteModelVersion", + "file": "aiplatform_v1beta1_generated_model_service_delete_model_version_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListArtifacts_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -24973,42 +29811,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_list_artifacts_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_delete_model_version_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_contexts", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.delete_model_version", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListContexts", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "ListContexts" + "shortName": "DeleteModelVersion" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListContextsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelVersionRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25024,22 +29861,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsAsyncPager", - "shortName": "list_contexts" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model_version" }, - "description": "Sample for ListContexts", - "file": "aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py", + "description": "Sample for DeleteModelVersion", + "file": "aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListContexts_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -25054,41 +29891,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_list_contexts_async.py" + "title": "aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_contexts", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.delete_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListContexts", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "ListContexts" + "shortName": "DeleteModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListContextsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25104,22 +29942,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListContextsPager", - "shortName": "list_contexts" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_model" }, - "description": "Sample for ListContexts", - "file": "aiplatform_v1beta1_generated_metadata_service_list_contexts_sync.py", + "description": "Sample for DeleteModel", + "file": "aiplatform_v1beta1_generated_model_service_delete_model_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListContexts_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModel_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -25134,42 +29972,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_list_contexts_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_delete_model_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_executions", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.delete_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "ListExecutions" + "shortName": "DeleteModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25185,22 +30022,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsAsyncPager", - "shortName": "list_executions" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_model" }, - "description": "Sample for ListExecutions", - "file": "aiplatform_v1beta1_generated_metadata_service_list_executions_async.py", + "description": "Sample for DeleteModel", + "file": "aiplatform_v1beta1_generated_model_service_delete_model_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListExecutions_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModel_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -25215,43 +30052,48 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_list_executions_async.py" + "title": "aiplatform_v1beta1_generated_model_service_delete_model_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_executions", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.export_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListExecutions", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ExportModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "ListExecutions" + "shortName": "ExportModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListExecutionsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, + { + "name": "output_config", + "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -25265,22 +30107,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListExecutionsPager", - "shortName": "list_executions" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "export_model" }, - "description": "Sample for ListExecutions", - "file": "aiplatform_v1beta1_generated_metadata_service_list_executions_sync.py", + "description": "Sample for ExportModel", + "file": "aiplatform_v1beta1_generated_model_service_export_model_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListExecutions_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ExportModel_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -25295,44 +30137,47 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_list_executions_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_export_model_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_metadata_schemas", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.export_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ExportModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "ListMetadataSchemas" + "shortName": "ExportModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, + { + "name": "output_config", + "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -25346,22 +30191,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasAsyncPager", - "shortName": "list_metadata_schemas" + "resultType": "google.api_core.operation.Operation", + "shortName": "export_model" }, - "description": "Sample for ListMetadataSchemas", - "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_async.py", + "description": "Sample for ExportModel", + "file": "aiplatform_v1beta1_generated_model_service_export_model_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataSchemas_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ExportModel_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -25376,41 +30221,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_async.py" + "title": "aiplatform_v1beta1_generated_model_service_export_model_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_metadata_schemas", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.get_model_evaluation_slice", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataSchemas", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "ListMetadataSchemas" + "shortName": "GetModelEvaluationSlice" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataSchemasRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25426,22 +30272,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataSchemasPager", - "shortName": "list_metadata_schemas" + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice", + "shortName": "get_model_evaluation_slice" }, - "description": "Sample for ListMetadataSchemas", - "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_sync.py", + "description": "Sample for GetModelEvaluationSlice", + "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataSchemas_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25461,37 +30307,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_schemas_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.list_metadata_stores", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.get_model_evaluation_slice", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "ListMetadataStores" + "shortName": "GetModelEvaluationSlice" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25507,22 +30352,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresAsyncPager", - "shortName": "list_metadata_stores" + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice", + "shortName": "get_model_evaluation_slice" }, - "description": "Sample for ListMetadataStores", - "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py", + "description": "Sample for GetModelEvaluationSlice", + "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataStores_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25542,36 +30387,37 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_async.py" + "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.list_metadata_stores", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.get_model_evaluation", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.ListMetadataStores", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "ListMetadataStores" + "shortName": "GetModelEvaluation" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListMetadataStoresRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25587,22 +30433,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.metadata_service.pagers.ListMetadataStoresPager", - "shortName": "list_metadata_stores" + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", + "shortName": "get_model_evaluation" }, - "description": "Sample for ListMetadataStores", - "file": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_sync.py", + "description": "Sample for GetModelEvaluation", + "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_ListMetadataStores_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluation_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25622,37 +30468,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_list_metadata_stores_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.purge_artifacts", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.get_model_evaluation", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "PurgeArtifacts" + "shortName": "GetModelEvaluation" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25668,22 +30513,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "purge_artifacts" + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", + "shortName": "get_model_evaluation" }, - "description": "Sample for PurgeArtifacts", - "file": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_async.py", + "description": "Sample for GetModelEvaluation", + "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeArtifacts_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluation_sync", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25693,46 +30538,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_async.py" + "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.purge_artifacts", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.get_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeArtifacts", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "PurgeArtifacts" + "shortName": "GetModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.PurgeArtifactsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetModelRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25748,22 +30594,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "purge_artifacts" + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "get_model" }, - "description": "Sample for PurgeArtifacts", - "file": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_sync.py", + "description": "Sample for GetModel", + "file": "aiplatform_v1beta1_generated_model_service_get_model_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeArtifacts_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModel_async", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25773,47 +30619,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_purge_artifacts_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_get_model_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.purge_contexts", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.get_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "PurgeContexts" + "shortName": "GetModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetModelRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -25829,22 +30674,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "purge_contexts" + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "get_model" }, - "description": "Sample for PurgeContexts", - "file": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_async.py", + "description": "Sample for GetModel", + "file": "aiplatform_v1beta1_generated_model_service_get_model_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeContexts_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModel_sync", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25854,48 +30699,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_async.py" + "title": "aiplatform_v1beta1_generated_model_service_get_model_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.purge_contexts", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.import_model_evaluation", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeContexts", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "PurgeContexts" + "shortName": "ImportModelEvaluation" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.PurgeContextsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ImportModelEvaluationRequest" }, { "name": "parent", "type": "str" }, + { + "name": "model_evaluation", + "type": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -25909,22 +30759,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "purge_contexts" + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", + "shortName": "import_model_evaluation" }, - "description": "Sample for PurgeContexts", - "file": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_sync.py", + "description": "Sample for ImportModelEvaluation", + "file": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeContexts_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_async", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -25934,49 +30784,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_purge_contexts_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.purge_executions", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.import_model_evaluation", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "PurgeExecutions" + "shortName": "ImportModelEvaluation" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ImportModelEvaluationRequest" }, { "name": "parent", "type": "str" }, + { + "name": "model_evaluation", + "type": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -25990,22 +30843,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "purge_executions" + "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", + "shortName": "import_model_evaluation" }, - "description": "Sample for PurgeExecutions", - "file": "aiplatform_v1beta1_generated_metadata_service_purge_executions_async.py", + "description": "Sample for ImportModelEvaluation", + "file": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeExecutions_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_sync", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -26015,43 +30868,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_purge_executions_async.py" + "title": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.purge_executions", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_model_evaluation_slices", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.PurgeExecutions", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "PurgeExecutions" + "shortName": "ListModelEvaluationSlices" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.PurgeExecutionsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest" }, { "name": "parent", @@ -26070,22 +30924,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "purge_executions" + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager", + "shortName": "list_model_evaluation_slices" }, - "description": "Sample for PurgeExecutions", - "file": "aiplatform_v1beta1_generated_metadata_service_purge_executions_sync.py", + "description": "Sample for ListModelEvaluationSlices", + "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_PurgeExecutions_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_async", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26095,47 +30949,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_purge_executions_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.query_artifact_lineage_subgraph", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_model_evaluation_slices", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "QueryArtifactLineageSubgraph" + "shortName": "ListModelEvaluationSlices" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest" }, { - "name": "artifact", + "name": "parent", "type": "str" }, { @@ -26151,22 +31004,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", - "shortName": "query_artifact_lineage_subgraph" + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesPager", + "shortName": "list_model_evaluation_slices" }, - "description": "Sample for QueryArtifactLineageSubgraph", - "file": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_async.py", + "description": "Sample for ListModelEvaluationSlices", + "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryArtifactLineageSubgraph_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26186,36 +31039,37 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_async.py" + "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.query_artifact_lineage_subgraph", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_model_evaluations", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryArtifactLineageSubgraph", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "QueryArtifactLineageSubgraph" + "shortName": "ListModelEvaluations" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.QueryArtifactLineageSubgraphRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest" }, { - "name": "artifact", + "name": "parent", "type": "str" }, { @@ -26231,22 +31085,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", - "shortName": "query_artifact_lineage_subgraph" + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsAsyncPager", + "shortName": "list_model_evaluations" }, - "description": "Sample for QueryArtifactLineageSubgraph", - "file": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py", + "description": "Sample for ListModelEvaluations", + "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryArtifactLineageSubgraph_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26266,37 +31120,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_query_artifact_lineage_subgraph_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.query_context_lineage_subgraph", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_model_evaluations", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "QueryContextLineageSubgraph" + "shortName": "ListModelEvaluations" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest" }, { - "name": "context", + "name": "parent", "type": "str" }, { @@ -26312,22 +31165,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", - "shortName": "query_context_lineage_subgraph" + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsPager", + "shortName": "list_model_evaluations" }, - "description": "Sample for QueryContextLineageSubgraph", - "file": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_async.py", + "description": "Sample for ListModelEvaluations", + "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryContextLineageSubgraph_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26347,36 +31200,37 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_async.py" + "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.query_context_lineage_subgraph", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_model_versions", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryContextLineageSubgraph", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "QueryContextLineageSubgraph" + "shortName": "ListModelVersions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.QueryContextLineageSubgraphRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest" }, { - "name": "context", + "name": "name", "type": "str" }, { @@ -26392,22 +31246,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", - "shortName": "query_context_lineage_subgraph" + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelVersionsAsyncPager", + "shortName": "list_model_versions" }, - "description": "Sample for QueryContextLineageSubgraph", - "file": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_sync.py", + "description": "Sample for ListModelVersions", + "file": "aiplatform_v1beta1_generated_model_service_list_model_versions_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryContextLineageSubgraph_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelVersions_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26427,37 +31281,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_query_context_lineage_subgraph_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_list_model_versions_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.query_execution_inputs_and_outputs", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_model_versions", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "QueryExecutionInputsAndOutputs" + "shortName": "ListModelVersions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest" }, { - "name": "execution", + "name": "name", "type": "str" }, { @@ -26473,22 +31326,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", - "shortName": "query_execution_inputs_and_outputs" + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelVersionsPager", + "shortName": "list_model_versions" }, - "description": "Sample for QueryExecutionInputsAndOutputs", - "file": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_async.py", + "description": "Sample for ListModelVersions", + "file": "aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryExecutionInputsAndOutputs_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelVersions_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26508,36 +31361,37 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_async.py" + "title": "aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.query_execution_inputs_and_outputs", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_models", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.QueryExecutionInputsAndOutputs", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModels", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "QueryExecutionInputsAndOutputs" + "shortName": "ListModels" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.QueryExecutionInputsAndOutputsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListModelsRequest" }, { - "name": "execution", + "name": "parent", "type": "str" }, { @@ -26553,22 +31407,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.LineageSubgraph", - "shortName": "query_execution_inputs_and_outputs" + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsAsyncPager", + "shortName": "list_models" }, - "description": "Sample for QueryExecutionInputsAndOutputs", - "file": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py", + "description": "Sample for ListModels", + "file": "aiplatform_v1beta1_generated_model_service_list_models_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_QueryExecutionInputsAndOutputs_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModels_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26588,43 +31442,38 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_query_execution_inputs_and_outputs_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_list_models_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.remove_context_children", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_models", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.RemoveContextChildren", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModels", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "RemoveContextChildren" + "shortName": "ListModels" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.RemoveContextChildrenRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListModelsRequest" }, { - "name": "context", + "name": "parent", "type": "str" }, - { - "name": "child_contexts", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -26638,22 +31487,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.RemoveContextChildrenResponse", - "shortName": "remove_context_children" + "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsPager", + "shortName": "list_models" }, - "description": "Sample for RemoveContextChildren", - "file": "aiplatform_v1beta1_generated_metadata_service_remove_context_children_async.py", + "description": "Sample for ListModels", + "file": "aiplatform_v1beta1_generated_model_service_list_models_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_RemoveContextChildren_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModels_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26673,40 +31522,41 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_remove_context_children_async.py" + "title": "aiplatform_v1beta1_generated_model_service_list_models_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.remove_context_children", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.merge_version_aliases", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.RemoveContextChildren", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "RemoveContextChildren" + "shortName": "MergeVersionAliases" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.RemoveContextChildrenRequest" + "type": "google.cloud.aiplatform_v1beta1.types.MergeVersionAliasesRequest" }, { - "name": "context", + "name": "name", "type": "str" }, { - "name": "child_contexts", + "name": "version_aliases", "type": "MutableSequence[str]" }, { @@ -26722,22 +31572,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.RemoveContextChildrenResponse", - "shortName": "remove_context_children" + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "merge_version_aliases" }, - "description": "Sample for RemoveContextChildren", - "file": "aiplatform_v1beta1_generated_metadata_service_remove_context_children_sync.py", + "description": "Sample for MergeVersionAliases", + "file": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_RemoveContextChildren_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26747,52 +31597,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 53, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_remove_context_children_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.update_artifact", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.merge_version_aliases", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "UpdateArtifact" + "shortName": "MergeVersionAliases" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest" + "type": "google.cloud.aiplatform_v1beta1.types.MergeVersionAliasesRequest" }, { - "name": "artifact", - "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + "name": "name", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "version_aliases", + "type": "MutableSequence[str]" }, { "name": "retry", @@ -26807,22 +31656,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", - "shortName": "update_artifact" + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "merge_version_aliases" }, - "description": "Sample for UpdateArtifact", - "file": "aiplatform_v1beta1_generated_metadata_service_update_artifact_async.py", + "description": "Sample for MergeVersionAliases", + "file": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateArtifact_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_sync", "segments": [ { - "end": 50, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 52, "start": 27, "type": "SHORT" }, @@ -26832,51 +31681,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 53, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_update_artifact_async.py" + "title": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.update_artifact", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.update_explanation_dataset", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateArtifact", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "UpdateArtifact" + "shortName": "UpdateExplanationDataset" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateArtifactRequest" - }, - { - "name": "artifact", - "type": "google.cloud.aiplatform_v1beta1.types.Artifact" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateExplanationDatasetRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "model", + "type": "str" }, { "name": "retry", @@ -26891,22 +31737,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Artifact", - "shortName": "update_artifact" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_explanation_dataset" }, - "description": "Sample for UpdateArtifact", - "file": "aiplatform_v1beta1_generated_metadata_service_update_artifact_sync.py", + "description": "Sample for UpdateExplanationDataset", + "file": "aiplatform_v1beta1_generated_model_service_update_explanation_dataset_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateArtifact_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateExplanationDataset_async", "segments": [ { - "end": 50, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 55, "start": 27, "type": "SHORT" }, @@ -26916,52 +31762,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_update_artifact_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_update_explanation_dataset_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.update_context", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.update_explanation_dataset", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "UpdateContext" + "shortName": "UpdateExplanationDataset" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateContextRequest" - }, - { - "name": "context", - "type": "google.cloud.aiplatform_v1beta1.types.Context" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateExplanationDatasetRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "model", + "type": "str" }, { "name": "retry", @@ -26976,22 +31817,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Context", - "shortName": "update_context" + "resultType": "google.api_core.operation.Operation", + "shortName": "update_explanation_dataset" }, - "description": "Sample for UpdateContext", - "file": "aiplatform_v1beta1_generated_metadata_service_update_context_async.py", + "description": "Sample for UpdateExplanationDataset", + "file": "aiplatform_v1beta1_generated_model_service_update_explanation_dataset_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateContext_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateExplanationDataset_sync", "segments": [ { - "end": 50, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 55, "start": 27, "type": "SHORT" }, @@ -27001,47 +31842,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_update_context_async.py" + "title": "aiplatform_v1beta1_generated_model_service_update_explanation_dataset_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.update_context", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.update_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateContext", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "UpdateContext" + "shortName": "UpdateModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateContextRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelRequest" }, { - "name": "context", - "type": "google.cloud.aiplatform_v1beta1.types.Context" + "name": "model", + "type": "google.cloud.aiplatform_v1beta1.types.Model" }, { "name": "update_mask", @@ -27060,22 +31902,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Context", - "shortName": "update_context" + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "update_model" }, - "description": "Sample for UpdateContext", - "file": "aiplatform_v1beta1_generated_metadata_service_update_context_sync.py", + "description": "Sample for UpdateModel", + "file": "aiplatform_v1beta1_generated_model_service_update_model_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateContext_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateModel_async", "segments": [ { - "end": 50, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 54, "start": 27, "type": "SHORT" }, @@ -27085,48 +31927,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 51, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_update_context_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_update_model_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient", - "shortName": "MetadataServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceAsyncClient.update_execution", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.update_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "UpdateExecution" + "shortName": "UpdateModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelRequest" }, { - "name": "execution", - "type": "google.cloud.aiplatform_v1beta1.types.Execution" + "name": "model", + "type": "google.cloud.aiplatform_v1beta1.types.Model" }, { "name": "update_mask", @@ -27145,22 +31986,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", - "shortName": "update_execution" + "resultType": "google.cloud.aiplatform_v1beta1.types.Model", + "shortName": "update_model" }, - "description": "Sample for UpdateExecution", - "file": "aiplatform_v1beta1_generated_metadata_service_update_execution_async.py", + "description": "Sample for UpdateModel", + "file": "aiplatform_v1beta1_generated_model_service_update_model_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateExecution_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateModel_sync", "segments": [ { - "end": 50, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 54, "start": 27, "type": "SHORT" }, @@ -27170,51 +32011,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 51, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_update_execution_async.py" + "title": "aiplatform_v1beta1_generated_model_service_update_model_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient", - "shortName": "MetadataServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", + "shortName": "ModelServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MetadataServiceClient.update_execution", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.upload_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService.UpdateExecution", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UploadModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MetadataService", - "shortName": "MetadataService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "UpdateExecution" + "shortName": "UploadModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateExecutionRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UploadModelRequest" }, { - "name": "execution", - "type": "google.cloud.aiplatform_v1beta1.types.Execution" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "model", + "type": "google.cloud.aiplatform_v1beta1.types.Model" }, { "name": "retry", @@ -27229,22 +32071,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Execution", - "shortName": "update_execution" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "upload_model" }, - "description": "Sample for UpdateExecution", - "file": "aiplatform_v1beta1_generated_metadata_service_update_execution_sync.py", + "description": "Sample for UploadModel", + "file": "aiplatform_v1beta1_generated_model_service_upload_model_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MetadataService_UpdateExecution_sync", + "regionTag": "aiplatform_v1beta1_generated_ModelService_UploadModel_async", "segments": [ { - "end": 50, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 59, "start": 27, "type": "SHORT" }, @@ -27254,52 +32096,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_metadata_service_update_execution_sync.py" + "title": "aiplatform_v1beta1_generated_model_service_upload_model_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient", - "shortName": "MigrationServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", + "shortName": "ModelServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient.batch_migrate_resources", + "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.upload_model", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources", + "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UploadModel", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", - "shortName": "MigrationService" + "fullName": "google.cloud.aiplatform.v1beta1.ModelService", + "shortName": "ModelService" }, - "shortName": "BatchMigrateResources" + "shortName": "UploadModel" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UploadModelRequest" }, { "name": "parent", "type": "str" }, { - "name": "migrate_resource_requests", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]" + "name": "model", + "type": "google.cloud.aiplatform_v1beta1.types.Model" }, { "name": "retry", @@ -27314,22 +32155,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "batch_migrate_resources" + "resultType": "google.api_core.operation.Operation", + "shortName": "upload_model" }, - "description": "Sample for BatchMigrateResources", - "file": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_async.py", + "description": "Sample for UploadModel", + "file": "aiplatform_v1beta1_generated_model_service_upload_model_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MigrationService_BatchMigrateResources_async", + "regionTag": "aiplatform_v1beta1_generated_ModelService_UploadModel_sync", "segments": [ { - "end": 61, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 61, + "end": 59, "start": 27, "type": "SHORT" }, @@ -27339,51 +32180,60 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 52, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 62, - "start": 59, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_async.py" + "title": "aiplatform_v1beta1_generated_model_service_upload_model_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient", - "shortName": "MigrationServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient.batch_migrate_resources", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient.assign_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.AssignNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", - "shortName": "MigrationService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "BatchMigrateResources" + "shortName": "AssignNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchMigrateResourcesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.AssignNotebookRuntimeRequest" }, { "name": "parent", "type": "str" }, { - "name": "migrate_resource_requests", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.MigrateResourceRequest]" + "name": "notebook_runtime_template", + "type": "str" + }, + { + "name": "notebook_runtime", + "type": "google.cloud.aiplatform_v1beta1.types.NotebookRuntime" + }, + { + "name": "notebook_runtime_id", + "type": "str" }, { "name": "retry", @@ -27398,14 +32248,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "batch_migrate_resources" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "assign_notebook_runtime" }, - "description": "Sample for BatchMigrateResources", - "file": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_sync.py", + "description": "Sample for AssignNotebookRuntime", + "file": "aiplatform_v1beta1_generated_notebook_service_assign_notebook_runtime_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MigrationService_BatchMigrateResources_sync", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_AssignNotebookRuntime_async", "segments": [ { "end": 61, @@ -27438,34 +32288,45 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_migration_service_batch_migrate_resources_sync.py" + "title": "aiplatform_v1beta1_generated_notebook_service_assign_notebook_runtime_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient", - "shortName": "MigrationServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceAsyncClient.search_migratable_resources", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient.assign_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.AssignNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", - "shortName": "MigrationService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "SearchMigratableResources" + "shortName": "AssignNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.AssignNotebookRuntimeRequest" }, { "name": "parent", "type": "str" }, + { + "name": "notebook_runtime_template", + "type": "str" + }, + { + "name": "notebook_runtime", + "type": "google.cloud.aiplatform_v1beta1.types.NotebookRuntime" + }, + { + "name": "notebook_runtime_id", + "type": "str" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -27479,22 +32340,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesAsyncPager", - "shortName": "search_migratable_resources" + "resultType": "google.api_core.operation.Operation", + "shortName": "assign_notebook_runtime" }, - "description": "Sample for SearchMigratableResources", - "file": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_async.py", + "description": "Sample for AssignNotebookRuntime", + "file": "aiplatform_v1beta1_generated_notebook_service_assign_notebook_runtime_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MigrationService_SearchMigratableResources_async", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_AssignNotebookRuntime_sync", "segments": [ { - "end": 52, + "end": 61, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 61, "start": 27, "type": "SHORT" }, @@ -27504,48 +32365,57 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 62, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_async.py" + "title": "aiplatform_v1beta1_generated_notebook_service_assign_notebook_runtime_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient", - "shortName": "MigrationServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.MigrationServiceClient.search_migratable_resources", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient.create_notebook_runtime_template", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.CreateNotebookRuntimeTemplate", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.MigrationService", - "shortName": "MigrationService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "SearchMigratableResources" + "shortName": "CreateNotebookRuntimeTemplate" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.SearchMigratableResourcesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateNotebookRuntimeTemplateRequest" }, { "name": "parent", "type": "str" }, + { + "name": "notebook_runtime_template", + "type": "google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplate" + }, + { + "name": "notebook_runtime_template_id", + "type": "str" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -27559,22 +32429,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.migration_service.pagers.SearchMigratableResourcesPager", - "shortName": "search_migratable_resources" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_notebook_runtime_template" }, - "description": "Sample for SearchMigratableResources", - "file": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_sync.py", + "description": "Sample for CreateNotebookRuntimeTemplate", + "file": "aiplatform_v1beta1_generated_notebook_service_create_notebook_runtime_template_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_MigrationService_SearchMigratableResources_sync", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_CreateNotebookRuntimeTemplate_async", "segments": [ { - "end": 52, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 59, "start": 27, "type": "SHORT" }, @@ -27584,47 +32454,54 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_migration_service_search_migratable_resources_sync.py" + "title": "aiplatform_v1beta1_generated_notebook_service_create_notebook_runtime_template_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceAsyncClient", - "shortName": "ModelGardenServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceAsyncClient.get_publisher_model", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient.create_notebook_runtime_template", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService.GetPublisherModel", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.CreateNotebookRuntimeTemplate", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService", - "shortName": "ModelGardenService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "GetPublisherModel" + "shortName": "CreateNotebookRuntimeTemplate" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetPublisherModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateNotebookRuntimeTemplateRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "notebook_runtime_template", + "type": "google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplate" + }, + { + "name": "notebook_runtime_template_id", "type": "str" }, { @@ -27640,22 +32517,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.PublisherModel", - "shortName": "get_publisher_model" + "resultType": "google.api_core.operation.Operation", + "shortName": "create_notebook_runtime_template" }, - "description": "Sample for GetPublisherModel", - "file": "aiplatform_v1beta1_generated_model_garden_service_get_publisher_model_async.py", + "description": "Sample for CreateNotebookRuntimeTemplate", + "file": "aiplatform_v1beta1_generated_notebook_service_create_notebook_runtime_template_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelGardenService_GetPublisherModel_async", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_CreateNotebookRuntimeTemplate_sync", "segments": [ { - "end": 51, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 59, "start": 27, "type": "SHORT" }, @@ -27665,43 +32542,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_garden_service_get_publisher_model_async.py" + "title": "aiplatform_v1beta1_generated_notebook_service_create_notebook_runtime_template_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceClient", - "shortName": "ModelGardenServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceClient.get_publisher_model", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient.delete_notebook_runtime_template", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService.GetPublisherModel", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.DeleteNotebookRuntimeTemplate", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService", - "shortName": "ModelGardenService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "GetPublisherModel" + "shortName": "DeleteNotebookRuntimeTemplate" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetPublisherModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteNotebookRuntimeTemplateRequest" }, { "name": "name", @@ -27720,22 +32598,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.PublisherModel", - "shortName": "get_publisher_model" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_notebook_runtime_template" }, - "description": "Sample for GetPublisherModel", - "file": "aiplatform_v1beta1_generated_model_garden_service_get_publisher_model_sync.py", + "description": "Sample for DeleteNotebookRuntimeTemplate", + "file": "aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_template_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelGardenService_GetPublisherModel_sync", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_DeleteNotebookRuntimeTemplate_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -27750,42 +32628,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_garden_service_get_publisher_model_sync.py" + "title": "aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_template_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceAsyncClient", - "shortName": "ModelGardenServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceAsyncClient.list_publisher_models", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient.delete_notebook_runtime_template", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService.ListPublisherModels", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.DeleteNotebookRuntimeTemplate", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService", - "shortName": "ModelGardenService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "ListPublisherModels" + "shortName": "DeleteNotebookRuntimeTemplate" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListPublisherModelsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteNotebookRuntimeTemplateRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -27801,22 +32678,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.model_garden_service.pagers.ListPublisherModelsAsyncPager", - "shortName": "list_publisher_models" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_notebook_runtime_template" }, - "description": "Sample for ListPublisherModels", - "file": "aiplatform_v1beta1_generated_model_garden_service_list_publisher_models_async.py", + "description": "Sample for DeleteNotebookRuntimeTemplate", + "file": "aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_template_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelGardenService_ListPublisherModels_async", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_DeleteNotebookRuntimeTemplate_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -27831,41 +32708,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_garden_service_list_publisher_models_async.py" + "title": "aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_template_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceClient", - "shortName": "ModelGardenServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelGardenServiceClient.list_publisher_models", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient.delete_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService.ListPublisherModels", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.DeleteNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelGardenService", - "shortName": "ModelGardenService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "ListPublisherModels" + "shortName": "DeleteNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListPublisherModelsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteNotebookRuntimeRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -27881,22 +32759,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.model_garden_service.pagers.ListPublisherModelsPager", - "shortName": "list_publisher_models" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_notebook_runtime" }, - "description": "Sample for ListPublisherModels", - "file": "aiplatform_v1beta1_generated_model_garden_service_list_publisher_models_sync.py", + "description": "Sample for DeleteNotebookRuntime", + "file": "aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelGardenService_ListPublisherModels_sync", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_DeleteNotebookRuntime_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -27911,48 +32789,43 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_garden_service_list_publisher_models_sync.py" + "title": "aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.batch_import_evaluated_annotations", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient.delete_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.BatchImportEvaluatedAnnotations", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.DeleteNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "BatchImportEvaluatedAnnotations" + "shortName": "DeleteNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchImportEvaluatedAnnotationsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteNotebookRuntimeRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "evaluated_annotations", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.EvaluatedAnnotation]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -27966,22 +32839,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchImportEvaluatedAnnotationsResponse", - "shortName": "batch_import_evaluated_annotations" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_notebook_runtime" }, - "description": "Sample for BatchImportEvaluatedAnnotations", - "file": "aiplatform_v1beta1_generated_model_service_batch_import_evaluated_annotations_async.py", + "description": "Sample for DeleteNotebookRuntime", + "file": "aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_BatchImportEvaluatedAnnotations_async", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_DeleteNotebookRuntime_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -27996,47 +32869,44 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_batch_import_evaluated_annotations_async.py" + "title": "aiplatform_v1beta1_generated_notebook_service_delete_notebook_runtime_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.batch_import_evaluated_annotations", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient.get_notebook_runtime_template", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.BatchImportEvaluatedAnnotations", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.GetNotebookRuntimeTemplate", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "BatchImportEvaluatedAnnotations" + "shortName": "GetNotebookRuntimeTemplate" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchImportEvaluatedAnnotationsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetNotebookRuntimeTemplateRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "evaluated_annotations", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.EvaluatedAnnotation]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -28050,14 +32920,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchImportEvaluatedAnnotationsResponse", - "shortName": "batch_import_evaluated_annotations" + "resultType": "google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplate", + "shortName": "get_notebook_runtime_template" }, - "description": "Sample for BatchImportEvaluatedAnnotations", - "file": "aiplatform_v1beta1_generated_model_service_batch_import_evaluated_annotations_sync.py", + "description": "Sample for GetNotebookRuntimeTemplate", + "file": "aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_template_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_BatchImportEvaluatedAnnotations_sync", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_GetNotebookRuntimeTemplate_async", "segments": [ { "end": 51, @@ -28090,38 +32960,33 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_batch_import_evaluated_annotations_sync.py" + "title": "aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_template_async.py" }, { "canonical": true, "clientMethod": { - "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.batch_import_model_evaluation_slices", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient.get_notebook_runtime_template", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.BatchImportModelEvaluationSlices", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.GetNotebookRuntimeTemplate", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "BatchImportModelEvaluationSlices" + "shortName": "GetNotebookRuntimeTemplate" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchImportModelEvaluationSlicesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetNotebookRuntimeTemplateRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "model_evaluation_slices", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -28135,14 +33000,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchImportModelEvaluationSlicesResponse", - "shortName": "batch_import_model_evaluation_slices" + "resultType": "google.cloud.aiplatform_v1beta1.types.NotebookRuntimeTemplate", + "shortName": "get_notebook_runtime_template" }, - "description": "Sample for BatchImportModelEvaluationSlices", - "file": "aiplatform_v1beta1_generated_model_service_batch_import_model_evaluation_slices_async.py", + "description": "Sample for GetNotebookRuntimeTemplate", + "file": "aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_template_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_BatchImportModelEvaluationSlices_async", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_GetNotebookRuntimeTemplate_sync", "segments": [ { "end": 51, @@ -28175,36 +33040,113 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_batch_import_model_evaluation_slices_async.py" + "title": "aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_template_sync.py" }, { "canonical": true, "clientMethod": { + "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.batch_import_model_evaluation_slices", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient.get_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.BatchImportModelEvaluationSlices", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.GetNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "BatchImportModelEvaluationSlices" + "shortName": "GetNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchImportModelEvaluationSlicesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetNotebookRuntimeRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { - "name": "model_evaluation_slices", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice]" + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.aiplatform_v1beta1.types.NotebookRuntime", + "shortName": "get_notebook_runtime" + }, + "description": "Sample for GetNotebookRuntime", + "file": "aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_GetNotebookRuntime_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient", + "shortName": "NotebookServiceClient" + }, + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient.get_notebook_runtime", + "method": { + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.GetNotebookRuntime", + "service": { + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" + }, + "shortName": "GetNotebookRuntime" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.GetNotebookRuntimeRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -28219,14 +33161,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchImportModelEvaluationSlicesResponse", - "shortName": "batch_import_model_evaluation_slices" + "resultType": "google.cloud.aiplatform_v1beta1.types.NotebookRuntime", + "shortName": "get_notebook_runtime" }, - "description": "Sample for BatchImportModelEvaluationSlices", - "file": "aiplatform_v1beta1_generated_model_service_batch_import_model_evaluation_slices_sync.py", + "description": "Sample for GetNotebookRuntime", + "file": "aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_BatchImportModelEvaluationSlices_sync", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_GetNotebookRuntime_sync", "segments": [ { "end": 51, @@ -28259,38 +33201,34 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_batch_import_model_evaluation_slices_sync.py" + "title": "aiplatform_v1beta1_generated_notebook_service_get_notebook_runtime_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.copy_model", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient.list_notebook_runtime_templates", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.CopyModel", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimeTemplates", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "CopyModel" + "shortName": "ListNotebookRuntimeTemplates" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CopyModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimeTemplatesRequest" }, { "name": "parent", "type": "str" }, - { - "name": "source_model", - "type": "str" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -28304,22 +33242,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "copy_model" + "resultType": "google.cloud.aiplatform_v1beta1.services.notebook_service.pagers.ListNotebookRuntimeTemplatesAsyncPager", + "shortName": "list_notebook_runtime_templates" }, - "description": "Sample for CopyModel", - "file": "aiplatform_v1beta1_generated_model_service_copy_model_async.py", + "description": "Sample for ListNotebookRuntimeTemplates", + "file": "aiplatform_v1beta1_generated_notebook_service_list_notebook_runtime_templates_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_CopyModel_async", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_ListNotebookRuntimeTemplates_async", "segments": [ { - "end": 57, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 52, "start": 27, "type": "SHORT" }, @@ -28329,52 +33267,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 47, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 48, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_copy_model_async.py" + "title": "aiplatform_v1beta1_generated_notebook_service_list_notebook_runtime_templates_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.copy_model", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient.list_notebook_runtime_templates", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.CopyModel", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimeTemplates", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "CopyModel" + "shortName": "ListNotebookRuntimeTemplates" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CopyModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimeTemplatesRequest" }, { "name": "parent", "type": "str" }, - { - "name": "source_model", - "type": "str" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -28388,22 +33322,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "copy_model" + "resultType": "google.cloud.aiplatform_v1beta1.services.notebook_service.pagers.ListNotebookRuntimeTemplatesPager", + "shortName": "list_notebook_runtime_templates" }, - "description": "Sample for CopyModel", - "file": "aiplatform_v1beta1_generated_model_service_copy_model_sync.py", + "description": "Sample for ListNotebookRuntimeTemplates", + "file": "aiplatform_v1beta1_generated_notebook_service_list_notebook_runtime_templates_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_CopyModel_sync", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_ListNotebookRuntimeTemplates_sync", "segments": [ { - "end": 57, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 52, "start": 27, "type": "SHORT" }, @@ -28413,47 +33347,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 47, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 48, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_copy_model_sync.py" + "title": "aiplatform_v1beta1_generated_notebook_service_list_notebook_runtime_templates_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.delete_model_version", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient.list_notebook_runtimes", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimes", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "DeleteModelVersion" + "shortName": "ListNotebookRuntimes" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelVersionRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -28469,22 +33403,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_model_version" + "resultType": "google.cloud.aiplatform_v1beta1.services.notebook_service.pagers.ListNotebookRuntimesAsyncPager", + "shortName": "list_notebook_runtimes" }, - "description": "Sample for DeleteModelVersion", - "file": "aiplatform_v1beta1_generated_model_service_delete_model_version_async.py", + "description": "Sample for ListNotebookRuntimes", + "file": "aiplatform_v1beta1_generated_notebook_service_list_notebook_runtimes_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_async", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_ListNotebookRuntimes_async", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -28499,41 +33433,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_delete_model_version_async.py" + "title": "aiplatform_v1beta1_generated_notebook_service_list_notebook_runtimes_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.delete_model_version", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient.list_notebook_runtimes", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModelVersion", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.ListNotebookRuntimes", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "DeleteModelVersion" + "shortName": "ListNotebookRuntimes" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelVersionRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListNotebookRuntimesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -28549,22 +33483,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_model_version" + "resultType": "google.cloud.aiplatform_v1beta1.services.notebook_service.pagers.ListNotebookRuntimesPager", + "shortName": "list_notebook_runtimes" }, - "description": "Sample for DeleteModelVersion", - "file": "aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py", + "description": "Sample for ListNotebookRuntimes", + "file": "aiplatform_v1beta1_generated_notebook_service_list_notebook_runtimes_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModelVersion_sync", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_ListNotebookRuntimes_sync", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -28579,39 +33513,39 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_delete_model_version_sync.py" + "title": "aiplatform_v1beta1_generated_notebook_service_list_notebook_runtimes_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.delete_model", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient.start_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModel", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.StartNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "DeleteModel" + "shortName": "StartNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.StartNotebookRuntimeRequest" }, { "name": "name", @@ -28631,13 +33565,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_model" + "shortName": "start_notebook_runtime" }, - "description": "Sample for DeleteModel", - "file": "aiplatform_v1beta1_generated_model_service_delete_model_async.py", + "description": "Sample for StartNotebookRuntime", + "file": "aiplatform_v1beta1_generated_notebook_service_start_notebook_runtime_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModel_async", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_StartNotebookRuntime_async", "segments": [ { "end": 55, @@ -28670,28 +33604,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_delete_model_async.py" + "title": "aiplatform_v1beta1_generated_notebook_service_start_notebook_runtime_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.delete_model", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient.start_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.DeleteModel", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.StartNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "DeleteModel" + "shortName": "StartNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.StartNotebookRuntimeRequest" }, { "name": "name", @@ -28711,13 +33645,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_model" + "shortName": "start_notebook_runtime" }, - "description": "Sample for DeleteModel", - "file": "aiplatform_v1beta1_generated_model_service_delete_model_sync.py", + "description": "Sample for StartNotebookRuntime", + "file": "aiplatform_v1beta1_generated_notebook_service_start_notebook_runtime_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_DeleteModel_sync", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_StartNotebookRuntime_sync", "segments": [ { "end": 55, @@ -28750,38 +33684,34 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_delete_model_sync.py" + "title": "aiplatform_v1beta1_generated_notebook_service_start_notebook_runtime_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient", + "shortName": "NotebookServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.export_model", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceAsyncClient.upgrade_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ExportModel", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.UpgradeNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "ExportModel" + "shortName": "UpgradeNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpgradeNotebookRuntimeRequest" }, { "name": "name", "type": "str" }, - { - "name": "output_config", - "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -28796,13 +33726,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "export_model" + "shortName": "upgrade_notebook_runtime" }, - "description": "Sample for ExportModel", - "file": "aiplatform_v1beta1_generated_model_service_export_model_async.py", + "description": "Sample for UpgradeNotebookRuntime", + "file": "aiplatform_v1beta1_generated_notebook_service_upgrade_notebook_runtime_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ExportModel_async", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_UpgradeNotebookRuntime_async", "segments": [ { "end": 55, @@ -28835,37 +33765,33 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_export_model_async.py" + "title": "aiplatform_v1beta1_generated_notebook_service_upgrade_notebook_runtime_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient", + "shortName": "NotebookServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.export_model", + "fullName": "google.cloud.aiplatform_v1beta1.NotebookServiceClient.upgrade_notebook_runtime", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ExportModel", + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService.UpgradeNotebookRuntime", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.NotebookService", + "shortName": "NotebookService" }, - "shortName": "ExportModel" + "shortName": "UpgradeNotebookRuntime" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpgradeNotebookRuntimeRequest" }, { "name": "name", "type": "str" }, - { - "name": "output_config", - "type": "google.cloud.aiplatform_v1beta1.types.ExportModelRequest.OutputConfig" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -28880,13 +33806,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "export_model" + "shortName": "upgrade_notebook_runtime" }, - "description": "Sample for ExportModel", - "file": "aiplatform_v1beta1_generated_model_service_export_model_sync.py", + "description": "Sample for UpgradeNotebookRuntime", + "file": "aiplatform_v1beta1_generated_notebook_service_upgrade_notebook_runtime_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ExportModel_sync", + "regionTag": "aiplatform_v1beta1_generated_NotebookService_UpgradeNotebookRuntime_sync", "segments": [ { "end": 55, @@ -28919,32 +33845,40 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_export_model_sync.py" + "title": "aiplatform_v1beta1_generated_notebook_service_upgrade_notebook_runtime_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient", + "shortName": "PersistentResourceServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.get_model_evaluation_slice", + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient.create_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice", + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.CreatePersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "GetModelEvaluationSlice" + "shortName": "CreatePersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreatePersistentResourceRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "persistent_resource", + "type": "google.cloud.aiplatform_v1beta1.types.PersistentResource" + }, + { + "name": "persistent_resource_id", "type": "str" }, { @@ -28960,22 +33894,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice", - "shortName": "get_model_evaluation_slice" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_persistent_resource" }, - "description": "Sample for GetModelEvaluationSlice", - "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py", + "description": "Sample for CreatePersistentResource", + "file": "aiplatform_v1beta1_generated_persistent_resource_service_create_persistent_resource_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_async", + "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_CreatePersistentResource_async", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -28985,46 +33919,54 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_async.py" + "title": "aiplatform_v1beta1_generated_persistent_resource_service_create_persistent_resource_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient", + "shortName": "PersistentResourceServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.get_model_evaluation_slice", + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient.create_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice", + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.CreatePersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "GetModelEvaluationSlice" + "shortName": "CreatePersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationSliceRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreatePersistentResourceRequest" }, { - "name": "name", + "name": "parent", + "type": "str" + }, + { + "name": "persistent_resource", + "type": "google.cloud.aiplatform_v1beta1.types.PersistentResource" + }, + { + "name": "persistent_resource_id", "type": "str" }, { @@ -29040,22 +33982,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluationSlice", - "shortName": "get_model_evaluation_slice" + "resultType": "google.api_core.operation.Operation", + "shortName": "create_persistent_resource" }, - "description": "Sample for GetModelEvaluationSlice", - "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_sync.py", + "description": "Sample for CreatePersistentResource", + "file": "aiplatform_v1beta1_generated_persistent_resource_service_create_persistent_resource_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluationSlice_sync", + "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_CreatePersistentResource_sync", "segments": [ { - "end": 51, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 56, "start": 27, "type": "SHORT" }, @@ -29065,44 +34007,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_slice_sync.py" + "title": "aiplatform_v1beta1_generated_persistent_resource_service_create_persistent_resource_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient", + "shortName": "PersistentResourceServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.get_model_evaluation", + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient.delete_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation", + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.DeletePersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "GetModelEvaluation" + "shortName": "DeletePersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeletePersistentResourceRequest" }, { "name": "name", @@ -29121,22 +34063,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", - "shortName": "get_model_evaluation" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_persistent_resource" }, - "description": "Sample for GetModelEvaluation", - "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_async.py", + "description": "Sample for DeletePersistentResource", + "file": "aiplatform_v1beta1_generated_persistent_resource_service_delete_persistent_resource_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluation_async", + "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_DeletePersistentResource_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -29151,38 +34093,38 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_async.py" + "title": "aiplatform_v1beta1_generated_persistent_resource_service_delete_persistent_resource_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient", + "shortName": "PersistentResourceServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.get_model_evaluation", + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient.delete_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation", + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.DeletePersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "GetModelEvaluation" + "shortName": "DeletePersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetModelEvaluationRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeletePersistentResourceRequest" }, { "name": "name", @@ -29201,22 +34143,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", - "shortName": "get_model_evaluation" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_persistent_resource" }, - "description": "Sample for GetModelEvaluation", - "file": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_sync.py", + "description": "Sample for DeletePersistentResource", + "file": "aiplatform_v1beta1_generated_persistent_resource_service_delete_persistent_resource_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModelEvaluation_sync", + "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_DeletePersistentResource_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -29231,39 +34173,39 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_get_model_evaluation_sync.py" + "title": "aiplatform_v1beta1_generated_persistent_resource_service_delete_persistent_resource_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient", + "shortName": "PersistentResourceServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.get_model", + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient.get_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModel", + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.GetPersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "GetModel" + "shortName": "GetPersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetPersistentResourceRequest" }, { "name": "name", @@ -29282,14 +34224,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Model", - "shortName": "get_model" + "resultType": "google.cloud.aiplatform_v1beta1.types.PersistentResource", + "shortName": "get_persistent_resource" }, - "description": "Sample for GetModel", - "file": "aiplatform_v1beta1_generated_model_service_get_model_async.py", + "description": "Sample for GetPersistentResource", + "file": "aiplatform_v1beta1_generated_persistent_resource_service_get_persistent_resource_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModel_async", + "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_GetPersistentResource_async", "segments": [ { "end": 51, @@ -29322,28 +34264,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_get_model_async.py" + "title": "aiplatform_v1beta1_generated_persistent_resource_service_get_persistent_resource_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient", + "shortName": "PersistentResourceServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.get_model", + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient.get_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.GetModel", + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.GetPersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "GetModel" + "shortName": "GetPersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetPersistentResourceRequest" }, { "name": "name", @@ -29362,14 +34304,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Model", - "shortName": "get_model" + "resultType": "google.cloud.aiplatform_v1beta1.types.PersistentResource", + "shortName": "get_persistent_resource" }, - "description": "Sample for GetModel", - "file": "aiplatform_v1beta1_generated_model_service_get_model_sync.py", + "description": "Sample for GetPersistentResource", + "file": "aiplatform_v1beta1_generated_persistent_resource_service_get_persistent_resource_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_GetModel_sync", + "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_GetPersistentResource_sync", "segments": [ { "end": 51, @@ -29402,38 +34344,34 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_get_model_sync.py" + "title": "aiplatform_v1beta1_generated_persistent_resource_service_get_persistent_resource_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient", + "shortName": "PersistentResourceServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.import_model_evaluation", + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient.list_persistent_resources", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation", + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.ListPersistentResources", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "ImportModelEvaluation" + "shortName": "ListPersistentResources" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ImportModelEvaluationRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListPersistentResourcesRequest" }, { "name": "parent", "type": "str" }, - { - "name": "model_evaluation", - "type": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -29447,22 +34385,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", - "shortName": "import_model_evaluation" + "resultType": "google.cloud.aiplatform_v1beta1.services.persistent_resource_service.pagers.ListPersistentResourcesAsyncPager", + "shortName": "list_persistent_resources" }, - "description": "Sample for ImportModelEvaluation", - "file": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py", + "description": "Sample for ListPersistentResources", + "file": "aiplatform_v1beta1_generated_persistent_resource_service_list_persistent_resources_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_async", + "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_ListPersistentResources_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -29482,42 +34420,38 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_async.py" + "title": "aiplatform_v1beta1_generated_persistent_resource_service_list_persistent_resources_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient", + "shortName": "PersistentResourceServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.import_model_evaluation", + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient.list_persistent_resources", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ImportModelEvaluation", + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.ListPersistentResources", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "ImportModelEvaluation" + "shortName": "ListPersistentResources" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ImportModelEvaluationRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListPersistentResourcesRequest" }, { "name": "parent", "type": "str" }, - { - "name": "model_evaluation", - "type": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -29531,22 +34465,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ModelEvaluation", - "shortName": "import_model_evaluation" + "resultType": "google.cloud.aiplatform_v1beta1.services.persistent_resource_service.pagers.ListPersistentResourcesPager", + "shortName": "list_persistent_resources" }, - "description": "Sample for ImportModelEvaluation", - "file": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py", + "description": "Sample for ListPersistentResources", + "file": "aiplatform_v1beta1_generated_persistent_resource_service_list_persistent_resources_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ImportModelEvaluation_sync", + "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_ListPersistentResources_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -29566,37 +34500,37 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_import_model_evaluation_sync.py" + "title": "aiplatform_v1beta1_generated_persistent_resource_service_list_persistent_resources_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient", + "shortName": "PersistentResourceServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_model_evaluation_slices", + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient.reboot_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices", + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.RebootPersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "ListModelEvaluationSlices" + "shortName": "RebootPersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.RebootPersistentResourceRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -29612,22 +34546,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesAsyncPager", - "shortName": "list_model_evaluation_slices" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "reboot_persistent_resource" }, - "description": "Sample for ListModelEvaluationSlices", - "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py", + "description": "Sample for RebootPersistentResource", + "file": "aiplatform_v1beta1_generated_persistent_resource_service_reboot_persistent_resource_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_async", + "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_RebootPersistentResource_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -29642,41 +34576,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_async.py" + "title": "aiplatform_v1beta1_generated_persistent_resource_service_reboot_persistent_resource_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient", + "shortName": "PersistentResourceServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_model_evaluation_slices", + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient.reboot_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices", + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.RebootPersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "ListModelEvaluationSlices" + "shortName": "RebootPersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationSlicesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.RebootPersistentResourceRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -29692,22 +34626,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationSlicesPager", - "shortName": "list_model_evaluation_slices" + "resultType": "google.api_core.operation.Operation", + "shortName": "reboot_persistent_resource" }, - "description": "Sample for ListModelEvaluationSlices", - "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py", + "description": "Sample for RebootPersistentResource", + "file": "aiplatform_v1beta1_generated_persistent_resource_service_reboot_persistent_resource_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluationSlices_sync", + "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_RebootPersistentResource_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -29722,43 +34656,47 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluation_slices_sync.py" + "title": "aiplatform_v1beta1_generated_persistent_resource_service_reboot_persistent_resource_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient", + "shortName": "PersistentResourceServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_model_evaluations", + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient.update_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations", + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.UpdatePersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "ListModelEvaluations" + "shortName": "UpdatePersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdatePersistentResourceRequest" }, { - "name": "parent", - "type": "str" + "name": "persistent_resource", + "type": "google.cloud.aiplatform_v1beta1.types.PersistentResource" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -29773,22 +34711,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsAsyncPager", - "shortName": "list_model_evaluations" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_persistent_resource" }, - "description": "Sample for ListModelEvaluations", - "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py", + "description": "Sample for UpdatePersistentResource", + "file": "aiplatform_v1beta1_generated_persistent_resource_service_update_persistent_resource_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_async", + "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_UpdatePersistentResource_async", "segments": [ { - "end": 52, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 54, "start": 27, "type": "SHORT" }, @@ -29798,47 +34736,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 51, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_async.py" + "title": "aiplatform_v1beta1_generated_persistent_resource_service_update_persistent_resource_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient", + "shortName": "PersistentResourceServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_model_evaluations", + "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient.update_persistent_resource", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations", + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.UpdatePersistentResource", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", + "shortName": "PersistentResourceService" }, - "shortName": "ListModelEvaluations" + "shortName": "UpdatePersistentResource" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListModelEvaluationsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdatePersistentResourceRequest" }, { - "name": "parent", - "type": "str" + "name": "persistent_resource", + "type": "google.cloud.aiplatform_v1beta1.types.PersistentResource" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -29853,22 +34795,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelEvaluationsPager", - "shortName": "list_model_evaluations" + "resultType": "google.api_core.operation.Operation", + "shortName": "update_persistent_resource" }, - "description": "Sample for ListModelEvaluations", - "file": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py", + "description": "Sample for UpdatePersistentResource", + "file": "aiplatform_v1beta1_generated_persistent_resource_service_update_persistent_resource_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelEvaluations_sync", + "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_UpdatePersistentResource_sync", "segments": [ { - "end": 52, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 54, "start": 27, "type": "SHORT" }, @@ -29878,49 +34820,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 51, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_list_model_evaluations_sync.py" + "title": "aiplatform_v1beta1_generated_persistent_resource_service_update_persistent_resource_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_model_versions", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.batch_cancel_pipeline_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.BatchCancelPipelineJobs", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "ListModelVersions" + "shortName": "BatchCancelPipelineJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchCancelPipelineJobsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "names", + "type": "MutableSequence[str]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -29934,22 +34880,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelVersionsAsyncPager", - "shortName": "list_model_versions" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_cancel_pipeline_jobs" }, - "description": "Sample for ListModelVersions", - "file": "aiplatform_v1beta1_generated_model_service_list_model_versions_async.py", + "description": "Sample for BatchCancelPipelineJobs", + "file": "aiplatform_v1beta1_generated_pipeline_service_batch_cancel_pipeline_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelVersions_async", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_BatchCancelPipelineJobs_async", "segments": [ { - "end": 52, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 56, "start": 27, "type": "SHORT" }, @@ -29959,48 +34905,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_list_model_versions_async.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_batch_cancel_pipeline_jobs_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_model_versions", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.batch_cancel_pipeline_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModelVersions", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.BatchCancelPipelineJobs", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "ListModelVersions" + "shortName": "BatchCancelPipelineJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListModelVersionsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchCancelPipelineJobsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "names", + "type": "MutableSequence[str]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -30014,22 +34964,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelVersionsPager", - "shortName": "list_model_versions" + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_cancel_pipeline_jobs" }, - "description": "Sample for ListModelVersions", - "file": "aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py", + "description": "Sample for BatchCancelPipelineJobs", + "file": "aiplatform_v1beta1_generated_pipeline_service_batch_cancel_pipeline_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModelVersions_sync", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_BatchCancelPipelineJobs_sync", "segments": [ { - "end": 52, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 56, "start": 27, "type": "SHORT" }, @@ -30039,49 +34989,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_list_model_versions_sync.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_batch_cancel_pipeline_jobs_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.list_models", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.batch_delete_pipeline_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModels", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.BatchDeletePipelineJobs", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "ListModels" + "shortName": "BatchDeletePipelineJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListModelsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchDeletePipelineJobsRequest" }, { "name": "parent", "type": "str" }, + { + "name": "names", + "type": "MutableSequence[str]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -30095,22 +35049,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsAsyncPager", - "shortName": "list_models" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "batch_delete_pipeline_jobs" }, - "description": "Sample for ListModels", - "file": "aiplatform_v1beta1_generated_model_service_list_models_async.py", + "description": "Sample for BatchDeletePipelineJobs", + "file": "aiplatform_v1beta1_generated_pipeline_service_batch_delete_pipeline_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModels_async", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_BatchDeletePipelineJobs_async", "segments": [ { - "end": 52, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 56, "start": 27, "type": "SHORT" }, @@ -30120,48 +35074,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_list_models_async.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_batch_delete_pipeline_jobs_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.list_models", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.batch_delete_pipeline_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.ListModels", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.BatchDeletePipelineJobs", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "ListModels" + "shortName": "BatchDeletePipelineJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListModelsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchDeletePipelineJobsRequest" }, { "name": "parent", "type": "str" }, + { + "name": "names", + "type": "MutableSequence[str]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -30175,22 +35133,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.model_service.pagers.ListModelsPager", - "shortName": "list_models" + "resultType": "google.api_core.operation.Operation", + "shortName": "batch_delete_pipeline_jobs" }, - "description": "Sample for ListModels", - "file": "aiplatform_v1beta1_generated_model_service_list_models_sync.py", + "description": "Sample for BatchDeletePipelineJobs", + "file": "aiplatform_v1beta1_generated_pipeline_service_batch_delete_pipeline_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_ListModels_sync", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_BatchDeletePipelineJobs_sync", "segments": [ { - "end": 52, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 56, "start": 27, "type": "SHORT" }, @@ -30200,53 +35158,49 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_list_models_sync.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_batch_delete_pipeline_jobs_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.merge_version_aliases", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.cancel_pipeline_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "MergeVersionAliases" + "shortName": "CancelPipelineJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.MergeVersionAliasesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest" }, { "name": "name", "type": "str" }, - { - "name": "version_aliases", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -30260,22 +35214,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Model", - "shortName": "merge_version_aliases" + "shortName": "cancel_pipeline_job" }, - "description": "Sample for MergeVersionAliases", - "file": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py", + "description": "Sample for CancelPipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_async", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelPipelineJob_async", "segments": [ { - "end": 52, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 49, "start": 27, "type": "SHORT" }, @@ -30285,52 +35238,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 50, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_async.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.merge_version_aliases", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.cancel_pipeline_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.MergeVersionAliases", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "MergeVersionAliases" + "shortName": "CancelPipelineJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.MergeVersionAliasesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest" }, { "name": "name", "type": "str" }, - { - "name": "version_aliases", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -30344,22 +35291,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Model", - "shortName": "merge_version_aliases" + "shortName": "cancel_pipeline_job" }, - "description": "Sample for MergeVersionAliases", - "file": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py", + "description": "Sample for CancelPipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_MergeVersionAliases_sync", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelPipelineJob_sync", "segments": [ { - "end": 52, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 49, "start": 27, "type": "SHORT" }, @@ -30369,47 +35315,45 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 50, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_merge_version_aliases_sync.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.update_explanation_dataset", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.cancel_training_pipeline", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "UpdateExplanationDataset" + "shortName": "CancelTrainingPipeline" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateExplanationDatasetRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest" }, { - "name": "model", + "name": "name", "type": "str" }, { @@ -30425,22 +35369,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_explanation_dataset" + "shortName": "cancel_training_pipeline" }, - "description": "Sample for UpdateExplanationDataset", - "file": "aiplatform_v1beta1_generated_model_service_update_explanation_dataset_async.py", + "description": "Sample for CancelTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateExplanationDataset_async", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelTrainingPipeline_async", "segments": [ { - "end": 55, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 49, "start": 27, "type": "SHORT" }, @@ -30455,41 +35398,39 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_update_explanation_dataset_async.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.update_explanation_dataset", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.cancel_training_pipeline", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateExplanationDataset", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "UpdateExplanationDataset" + "shortName": "CancelTrainingPipeline" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateExplanationDatasetRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest" }, { - "name": "model", + "name": "name", "type": "str" }, { @@ -30505,22 +35446,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_explanation_dataset" + "shortName": "cancel_training_pipeline" }, - "description": "Sample for UpdateExplanationDataset", - "file": "aiplatform_v1beta1_generated_model_service_update_explanation_dataset_sync.py", + "description": "Sample for CancelTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateExplanationDataset_sync", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelTrainingPipeline_sync", "segments": [ { - "end": 55, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 49, "start": 27, "type": "SHORT" }, @@ -30535,47 +35475,49 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_update_explanation_dataset_sync.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.update_model", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.create_pipeline_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateModel", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "UpdateModel" + "shortName": "CreatePipelineJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest" }, { - "name": "model", - "type": "google.cloud.aiplatform_v1beta1.types.Model" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "pipeline_job", + "type": "google.cloud.aiplatform_v1beta1.types.PipelineJob" + }, + { + "name": "pipeline_job_id", + "type": "str" }, { "name": "retry", @@ -30590,22 +35532,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Model", - "shortName": "update_model" + "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", + "shortName": "create_pipeline_job" }, - "description": "Sample for UpdateModel", - "file": "aiplatform_v1beta1_generated_model_service_update_model_async.py", + "description": "Sample for CreatePipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateModel_async", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreatePipelineJob_async", "segments": [ { - "end": 54, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 51, "start": 27, "type": "SHORT" }, @@ -30615,51 +35557,55 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 49, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_update_model_async.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.update_model", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.create_pipeline_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UpdateModel", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "UpdateModel" + "shortName": "CreatePipelineJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest" }, { - "name": "model", - "type": "google.cloud.aiplatform_v1beta1.types.Model" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "pipeline_job", + "type": "google.cloud.aiplatform_v1beta1.types.PipelineJob" + }, + { + "name": "pipeline_job_id", + "type": "str" }, { "name": "retry", @@ -30674,22 +35620,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Model", - "shortName": "update_model" + "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", + "shortName": "create_pipeline_job" }, - "description": "Sample for UpdateModel", - "file": "aiplatform_v1beta1_generated_model_service_update_model_sync.py", + "description": "Sample for CreatePipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_UpdateModel_sync", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreatePipelineJob_sync", "segments": [ { - "end": 54, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 51, "start": 27, "type": "SHORT" }, @@ -30699,52 +35645,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 49, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_update_model_sync.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient", - "shortName": "ModelServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceAsyncClient.upload_model", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.create_training_pipeline", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UploadModel", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "UploadModel" + "shortName": "CreateTrainingPipeline" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UploadModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest" }, { "name": "parent", "type": "str" }, { - "name": "model", - "type": "google.cloud.aiplatform_v1beta1.types.Model" + "name": "training_pipeline", + "type": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline" }, { "name": "retry", @@ -30759,22 +35705,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "upload_model" + "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", + "shortName": "create_training_pipeline" }, - "description": "Sample for UploadModel", - "file": "aiplatform_v1beta1_generated_model_service_upload_model_async.py", + "description": "Sample for CreateTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_UploadModel_async", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreateTrainingPipeline_async", "segments": [ { - "end": 59, + "end": 57, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 57, "start": 27, "type": "SHORT" }, @@ -30784,51 +35730,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 54, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 58, + "start": 55, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_upload_model_async.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient", - "shortName": "ModelServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ModelServiceClient.upload_model", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.create_training_pipeline", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService.UploadModel", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ModelService", - "shortName": "ModelService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "UploadModel" + "shortName": "CreateTrainingPipeline" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UploadModelRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest" }, { "name": "parent", "type": "str" }, { - "name": "model", - "type": "google.cloud.aiplatform_v1beta1.types.Model" + "name": "training_pipeline", + "type": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline" }, { "name": "retry", @@ -30843,22 +35789,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "upload_model" + "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", + "shortName": "create_training_pipeline" }, - "description": "Sample for UploadModel", - "file": "aiplatform_v1beta1_generated_model_service_upload_model_sync.py", + "description": "Sample for CreateTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ModelService_UploadModel_sync", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreateTrainingPipeline_sync", "segments": [ { - "end": 59, + "end": 57, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 57, "start": 27, "type": "SHORT" }, @@ -30868,55 +35814,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 54, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 58, + "start": 55, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_model_service_upload_model_sync.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient", - "shortName": "PersistentResourceServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient.create_persistent_resource", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.delete_pipeline_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.CreatePersistentResource", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", - "shortName": "PersistentResourceService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "CreatePersistentResource" + "shortName": "DeletePipelineJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreatePersistentResourceRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "persistent_resource", - "type": "google.cloud.aiplatform_v1beta1.types.PersistentResource" + "type": "google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest" }, { - "name": "persistent_resource_id", + "name": "name", "type": "str" }, { @@ -30933,21 +35871,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_persistent_resource" + "shortName": "delete_pipeline_job" }, - "description": "Sample for CreatePersistentResource", - "file": "aiplatform_v1beta1_generated_persistent_resource_service_create_persistent_resource_async.py", + "description": "Sample for DeletePipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_CreatePersistentResource_async", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeletePipelineJob_async", "segments": [ { - "end": 56, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 55, "start": 27, "type": "SHORT" }, @@ -30957,54 +35895,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_persistent_resource_service_create_persistent_resource_async.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient", - "shortName": "PersistentResourceServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient.create_persistent_resource", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.delete_pipeline_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.CreatePersistentResource", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", - "shortName": "PersistentResourceService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "CreatePersistentResource" + "shortName": "DeletePipelineJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreatePersistentResourceRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "persistent_resource", - "type": "google.cloud.aiplatform_v1beta1.types.PersistentResource" + "type": "google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest" }, { - "name": "persistent_resource_id", + "name": "name", "type": "str" }, { @@ -31021,21 +35951,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "create_persistent_resource" + "shortName": "delete_pipeline_job" }, - "description": "Sample for CreatePersistentResource", - "file": "aiplatform_v1beta1_generated_persistent_resource_service_create_persistent_resource_sync.py", + "description": "Sample for DeletePipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_CreatePersistentResource_sync", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeletePipelineJob_sync", "segments": [ { - "end": 56, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 55, "start": 27, "type": "SHORT" }, @@ -31045,44 +35975,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_persistent_resource_service_create_persistent_resource_sync.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient", - "shortName": "PersistentResourceServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient.delete_persistent_resource", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.delete_training_pipeline", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.DeletePersistentResource", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", - "shortName": "PersistentResourceService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "DeletePersistentResource" + "shortName": "DeleteTrainingPipeline" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeletePersistentResourceRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest" }, { "name": "name", @@ -31102,13 +36032,13 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_persistent_resource" + "shortName": "delete_training_pipeline" }, - "description": "Sample for DeletePersistentResource", - "file": "aiplatform_v1beta1_generated_persistent_resource_service_delete_persistent_resource_async.py", + "description": "Sample for DeleteTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_DeletePersistentResource_async", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeleteTrainingPipeline_async", "segments": [ { "end": 55, @@ -31141,28 +36071,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_persistent_resource_service_delete_persistent_resource_async.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient", - "shortName": "PersistentResourceServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient.delete_persistent_resource", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.delete_training_pipeline", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.DeletePersistentResource", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", - "shortName": "PersistentResourceService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "DeletePersistentResource" + "shortName": "DeleteTrainingPipeline" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeletePersistentResourceRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest" }, { "name": "name", @@ -31182,13 +36112,13 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_persistent_resource" + "shortName": "delete_training_pipeline" }, - "description": "Sample for DeletePersistentResource", - "file": "aiplatform_v1beta1_generated_persistent_resource_service_delete_persistent_resource_sync.py", + "description": "Sample for DeleteTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_DeletePersistentResource_sync", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeleteTrainingPipeline_sync", "segments": [ { "end": 55, @@ -31221,29 +36151,29 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_persistent_resource_service_delete_persistent_resource_sync.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient", - "shortName": "PersistentResourceServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient.get_persistent_resource", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.get_pipeline_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.GetPersistentResource", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", - "shortName": "PersistentResourceService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "GetPersistentResource" + "shortName": "GetPipelineJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetPersistentResourceRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest" }, { "name": "name", @@ -31262,14 +36192,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.PersistentResource", - "shortName": "get_persistent_resource" + "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", + "shortName": "get_pipeline_job" }, - "description": "Sample for GetPersistentResource", - "file": "aiplatform_v1beta1_generated_persistent_resource_service_get_persistent_resource_async.py", + "description": "Sample for GetPipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_GetPersistentResource_async", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetPipelineJob_async", "segments": [ { "end": 51, @@ -31302,28 +36232,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_persistent_resource_service_get_persistent_resource_async.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient", - "shortName": "PersistentResourceServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient.get_persistent_resource", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.get_pipeline_job", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.GetPersistentResource", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", - "shortName": "PersistentResourceService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "GetPersistentResource" + "shortName": "GetPipelineJob" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetPersistentResourceRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest" }, { "name": "name", @@ -31342,14 +36272,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.PersistentResource", - "shortName": "get_persistent_resource" + "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", + "shortName": "get_pipeline_job" }, - "description": "Sample for GetPersistentResource", - "file": "aiplatform_v1beta1_generated_persistent_resource_service_get_persistent_resource_sync.py", + "description": "Sample for GetPipelineJob", + "file": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_GetPersistentResource_sync", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetPipelineJob_sync", "segments": [ { "end": 51, @@ -31382,32 +36312,32 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_persistent_resource_service_get_persistent_resource_sync.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient", - "shortName": "PersistentResourceServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient.list_persistent_resources", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.get_training_pipeline", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.ListPersistentResources", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", - "shortName": "PersistentResourceService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "ListPersistentResources" + "shortName": "GetTrainingPipeline" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListPersistentResourcesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -31423,22 +36353,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.persistent_resource_service.pagers.ListPersistentResourcesAsyncPager", - "shortName": "list_persistent_resources" + "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", + "shortName": "get_training_pipeline" }, - "description": "Sample for ListPersistentResources", - "file": "aiplatform_v1beta1_generated_persistent_resource_service_list_persistent_resources_async.py", + "description": "Sample for GetTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_ListPersistentResources_async", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetTrainingPipeline_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -31458,36 +36388,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_persistent_resource_service_list_persistent_resources_async.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient", - "shortName": "PersistentResourceServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient.list_persistent_resources", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.get_training_pipeline", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.ListPersistentResources", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", - "shortName": "PersistentResourceService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "ListPersistentResources" + "shortName": "GetTrainingPipeline" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListPersistentResourcesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -31503,22 +36433,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.persistent_resource_service.pagers.ListPersistentResourcesPager", - "shortName": "list_persistent_resources" + "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", + "shortName": "get_training_pipeline" }, - "description": "Sample for ListPersistentResources", - "file": "aiplatform_v1beta1_generated_persistent_resource_service_list_persistent_resources_sync.py", + "description": "Sample for GetTrainingPipeline", + "file": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_ListPersistentResources_sync", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetTrainingPipeline_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -31538,42 +36468,38 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_persistent_resource_service_list_persistent_resources_sync.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient", - "shortName": "PersistentResourceServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", + "shortName": "PipelineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceAsyncClient.update_persistent_resource", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.list_pipeline_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.UpdatePersistentResource", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", - "shortName": "PersistentResourceService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "UpdatePersistentResource" + "shortName": "ListPipelineJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdatePersistentResourceRequest" - }, - { - "name": "persistent_resource", - "type": "google.cloud.aiplatform_v1beta1.types.PersistentResource" + "type": "google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "parent", + "type": "str" }, { "name": "retry", @@ -31588,22 +36514,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_persistent_resource" + "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager", + "shortName": "list_pipeline_jobs" }, - "description": "Sample for UpdatePersistentResource", - "file": "aiplatform_v1beta1_generated_persistent_resource_service_update_persistent_resource_async.py", + "description": "Sample for ListPipelineJobs", + "file": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_UpdatePersistentResource_async", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListPipelineJobs_async", "segments": [ { - "end": 54, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 52, "start": 27, "type": "SHORT" }, @@ -31613,51 +36539,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 45, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_persistent_resource_service_update_persistent_resource_async.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient", - "shortName": "PersistentResourceServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", + "shortName": "PipelineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PersistentResourceServiceClient.update_persistent_resource", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.list_pipeline_jobs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService.UpdatePersistentResource", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PersistentResourceService", - "shortName": "PersistentResourceService" + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", + "shortName": "PipelineService" }, - "shortName": "UpdatePersistentResource" + "shortName": "ListPipelineJobs" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdatePersistentResourceRequest" - }, - { - "name": "persistent_resource", - "type": "google.cloud.aiplatform_v1beta1.types.PersistentResource" + "type": "google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "parent", + "type": "str" }, { "name": "retry", @@ -31672,22 +36594,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_persistent_resource" + "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsPager", + "shortName": "list_pipeline_jobs" }, - "description": "Sample for UpdatePersistentResource", - "file": "aiplatform_v1beta1_generated_persistent_resource_service_update_persistent_resource_sync.py", + "description": "Sample for ListPipelineJobs", + "file": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PersistentResourceService_UpdatePersistentResource_sync", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListPipelineJobs_sync", "segments": [ { - "end": 54, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 52, "start": 27, "type": "SHORT" }, @@ -31697,22 +36619,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 45, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_persistent_resource_service_update_persistent_resource_sync.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_sync.py" }, { "canonical": true, @@ -31722,28 +36644,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", "shortName": "PipelineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.batch_cancel_pipeline_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.list_training_pipelines", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.BatchCancelPipelineJobs", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines", "service": { "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, - "shortName": "BatchCancelPipelineJobs" + "shortName": "ListTrainingPipelines" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchCancelPipelineJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest" }, { "name": "parent", "type": "str" }, - { - "name": "names", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -31757,22 +36675,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "batch_cancel_pipeline_jobs" + "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager", + "shortName": "list_training_pipelines" }, - "description": "Sample for BatchCancelPipelineJobs", - "file": "aiplatform_v1beta1_generated_pipeline_service_batch_cancel_pipeline_jobs_async.py", + "description": "Sample for ListTrainingPipelines", + "file": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_BatchCancelPipelineJobs_async", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListTrainingPipelines_async", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -31782,22 +36700,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_batch_cancel_pipeline_jobs_async.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_async.py" }, { "canonical": true, @@ -31806,28 +36724,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", "shortName": "PipelineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.batch_cancel_pipeline_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.list_training_pipelines", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.BatchCancelPipelineJobs", + "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines", "service": { "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", "shortName": "PipelineService" }, - "shortName": "BatchCancelPipelineJobs" + "shortName": "ListTrainingPipelines" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchCancelPipelineJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest" }, { "name": "parent", "type": "str" }, - { - "name": "names", - "type": "MutableSequence[str]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -31841,22 +36755,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "batch_cancel_pipeline_jobs" + "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesPager", + "shortName": "list_training_pipelines" }, - "description": "Sample for BatchCancelPipelineJobs", - "file": "aiplatform_v1beta1_generated_pipeline_service_batch_cancel_pipeline_jobs_sync.py", + "description": "Sample for ListTrainingPipelines", + "file": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_BatchCancelPipelineJobs_sync", + "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListTrainingPipelines_sync", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -31866,52 +36780,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_batch_cancel_pipeline_jobs_sync.py" + "title": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", - "shortName": "PipelineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.batch_delete_pipeline_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.chat_completions", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.BatchDeletePipelineJobs", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.ChatCompletions", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "BatchDeletePipelineJobs" + "shortName": "ChatCompletions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchDeletePipelineJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ChatCompletionsRequest" }, { - "name": "parent", + "name": "endpoint", "type": "str" }, { - "name": "names", - "type": "MutableSequence[str]" + "name": "http_body", + "type": "google.api.httpbody_pb2.HttpBody" }, { "name": "retry", @@ -31926,22 +36840,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "batch_delete_pipeline_jobs" + "resultType": "Iterable[google.api.httpbody_pb2.HttpBody]", + "shortName": "chat_completions" }, - "description": "Sample for BatchDeletePipelineJobs", - "file": "aiplatform_v1beta1_generated_pipeline_service_batch_delete_pipeline_jobs_async.py", + "description": "Sample for ChatCompletions", + "file": "aiplatform_v1beta1_generated_prediction_service_chat_completions_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_BatchDeletePipelineJobs_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_ChatCompletions_async", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -31951,51 +36865,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_batch_delete_pipeline_jobs_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_chat_completions_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", - "shortName": "PipelineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.batch_delete_pipeline_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.chat_completions", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.BatchDeletePipelineJobs", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.ChatCompletions", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "BatchDeletePipelineJobs" + "shortName": "ChatCompletions" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchDeletePipelineJobsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ChatCompletionsRequest" }, { - "name": "parent", + "name": "endpoint", "type": "str" }, { - "name": "names", - "type": "MutableSequence[str]" + "name": "http_body", + "type": "google.api.httpbody_pb2.HttpBody" }, { "name": "retry", @@ -32010,22 +36924,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "batch_delete_pipeline_jobs" + "resultType": "Iterable[google.api.httpbody_pb2.HttpBody]", + "shortName": "chat_completions" }, - "description": "Sample for BatchDeletePipelineJobs", - "file": "aiplatform_v1beta1_generated_pipeline_service_batch_delete_pipeline_jobs_sync.py", + "description": "Sample for ChatCompletions", + "file": "aiplatform_v1beta1_generated_prediction_service_chat_completions_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_BatchDeletePipelineJobs_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_ChatCompletions_sync", "segments": [ { - "end": 56, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 52, "start": 27, "type": "SHORT" }, @@ -32035,49 +36949,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_batch_delete_pipeline_jobs_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_chat_completions_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", - "shortName": "PipelineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.cancel_pipeline_job", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.count_tokens", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.CountTokens", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "CancelPipelineJob" + "shortName": "CountTokens" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CountTokensRequest" }, { - "name": "name", + "name": "endpoint", "type": "str" }, + { + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -32091,21 +37009,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_pipeline_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.CountTokensResponse", + "shortName": "count_tokens" }, - "description": "Sample for CancelPipelineJob", - "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_async.py", + "description": "Sample for CountTokens", + "file": "aiplatform_v1beta1_generated_prediction_service_count_tokens_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelPipelineJob_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_CountTokens_async", "segments": [ { - "end": 49, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 60, "start": 27, "type": "SHORT" }, @@ -32115,46 +37034,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "start": 46, + "end": 57, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_count_tokens_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", - "shortName": "PipelineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.cancel_pipeline_job", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.count_tokens", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelPipelineJob", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.CountTokens", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "CancelPipelineJob" + "shortName": "CountTokens" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelPipelineJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CountTokensRequest" }, { - "name": "name", + "name": "endpoint", "type": "str" }, + { + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -32168,21 +37093,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_pipeline_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.CountTokensResponse", + "shortName": "count_tokens" }, - "description": "Sample for CancelPipelineJob", - "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_sync.py", + "description": "Sample for CountTokens", + "file": "aiplatform_v1beta1_generated_prediction_service_count_tokens_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelPipelineJob_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_CountTokens_sync", "segments": [ { - "end": 49, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 60, "start": 27, "type": "SHORT" }, @@ -32192,46 +37118,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 54, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "start": 46, + "end": 57, + "start": 55, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_pipeline_job_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_count_tokens_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", - "shortName": "PipelineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.cancel_training_pipeline", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.direct_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.DirectPredict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "CancelTrainingPipeline" + "shortName": "DirectPredict" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.DirectPredictRequest" }, { "name": "retry", @@ -32246,21 +37170,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_training_pipeline" + "resultType": "google.cloud.aiplatform_v1beta1.types.DirectPredictResponse", + "shortName": "direct_predict" }, - "description": "Sample for CancelTrainingPipeline", - "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_async.py", + "description": "Sample for DirectPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_direct_predict_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelTrainingPipeline_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_DirectPredict_async", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -32275,40 +37200,38 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_direct_predict_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", - "shortName": "PipelineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.cancel_training_pipeline", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.direct_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.DirectPredict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "CancelTrainingPipeline" + "shortName": "DirectPredict" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CancelTrainingPipelineRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.DirectPredictRequest" }, { "name": "retry", @@ -32323,21 +37246,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "cancel_training_pipeline" + "resultType": "google.cloud.aiplatform_v1beta1.types.DirectPredictResponse", + "shortName": "direct_predict" }, - "description": "Sample for CancelTrainingPipeline", - "file": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_sync.py", + "description": "Sample for DirectPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_direct_predict_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_CancelTrainingPipeline_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_DirectPredict_sync", "segments": [ { - "end": 49, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 51, "start": 27, "type": "SHORT" }, @@ -32352,49 +37276,39 @@ "type": "REQUEST_INITIALIZATION" }, { + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_cancel_training_pipeline_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_direct_predict_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", - "shortName": "PipelineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.create_pipeline_job", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.direct_raw_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.DirectRawPredict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "CreatePipelineJob" + "shortName": "DirectRawPredict" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "pipeline_job", - "type": "google.cloud.aiplatform_v1beta1.types.PipelineJob" - }, - { - "name": "pipeline_job_id", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.DirectRawPredictRequest" }, { "name": "retry", @@ -32409,14 +37323,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", - "shortName": "create_pipeline_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.DirectRawPredictResponse", + "shortName": "direct_raw_predict" }, - "description": "Sample for CreatePipelineJob", - "file": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_async.py", + "description": "Sample for DirectRawPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_direct_raw_predict_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreatePipelineJob_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_DirectRawPredict_async", "segments": [ { "end": 51, @@ -32449,40 +37363,28 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_direct_raw_predict_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", - "shortName": "PipelineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.create_pipeline_job", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.direct_raw_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreatePipelineJob", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.DirectRawPredict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "CreatePipelineJob" + "shortName": "DirectRawPredict" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreatePipelineJobRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "pipeline_job", - "type": "google.cloud.aiplatform_v1beta1.types.PipelineJob" - }, - { - "name": "pipeline_job_id", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.DirectRawPredictRequest" }, { "name": "retry", @@ -32497,14 +37399,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", - "shortName": "create_pipeline_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.DirectRawPredictResponse", + "shortName": "direct_raw_predict" }, - "description": "Sample for CreatePipelineJob", - "file": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_sync.py", + "description": "Sample for DirectRawPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_direct_raw_predict_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreatePipelineJob_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_DirectRawPredict_sync", "segments": [ { "end": 51, @@ -32537,37 +37439,45 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_create_pipeline_job_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_direct_raw_predict_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", - "shortName": "PipelineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.create_training_pipeline", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.explain", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Explain", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "CreateTrainingPipeline" + "shortName": "Explain" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ExplainRequest" }, { - "name": "parent", + "name": "endpoint", "type": "str" }, { - "name": "training_pipeline", - "type": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline" + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "deployed_model_id", + "type": "str" }, { "name": "retry", @@ -32582,22 +37492,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", - "shortName": "create_training_pipeline" + "resultType": "google.cloud.aiplatform_v1beta1.types.ExplainResponse", + "shortName": "explain" }, - "description": "Sample for CreateTrainingPipeline", - "file": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_async.py", + "description": "Sample for Explain", + "file": "aiplatform_v1beta1_generated_prediction_service_explain_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreateTrainingPipeline_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_Explain_async", "segments": [ { - "end": 57, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 55, "start": 27, "type": "SHORT" }, @@ -32607,51 +37517,59 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 52, + "end": 52, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_explain_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", - "shortName": "PipelineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.create_training_pipeline", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.explain", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Explain", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "CreateTrainingPipeline" + "shortName": "Explain" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateTrainingPipelineRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ExplainRequest" }, { - "name": "parent", + "name": "endpoint", "type": "str" }, { - "name": "training_pipeline", - "type": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline" + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, + { + "name": "deployed_model_id", + "type": "str" }, { "name": "retry", @@ -32666,22 +37584,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", - "shortName": "create_training_pipeline" + "resultType": "google.cloud.aiplatform_v1beta1.types.ExplainResponse", + "shortName": "explain" }, - "description": "Sample for CreateTrainingPipeline", - "file": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_sync.py", + "description": "Sample for Explain", + "file": "aiplatform_v1beta1_generated_prediction_service_explain_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_CreateTrainingPipeline_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_Explain_sync", "segments": [ { - "end": 57, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 55, "start": 27, "type": "SHORT" }, @@ -32691,49 +37609,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 52, + "end": 52, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_create_training_pipeline_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_explain_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", - "shortName": "PipelineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.delete_pipeline_job", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.generate_content", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.GenerateContent", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "DeletePipelineJob" + "shortName": "GenerateContent" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GenerateContentRequest" }, { - "name": "name", + "name": "model", "type": "str" }, + { + "name": "contents", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -32747,14 +37669,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_pipeline_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.GenerateContentResponse", + "shortName": "generate_content" }, - "description": "Sample for DeletePipelineJob", - "file": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_async.py", + "description": "Sample for GenerateContent", + "file": "aiplatform_v1beta1_generated_prediction_service_generate_content_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeletePipelineJob_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_GenerateContent_async", "segments": [ { "end": 55, @@ -32772,13 +37694,13 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { "end": 52, - "start": 46, + "start": 50, "type": "REQUEST_EXECUTION" }, { @@ -32787,33 +37709,37 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_generate_content_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", - "shortName": "PipelineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.delete_pipeline_job", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.generate_content", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeletePipelineJob", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.GenerateContent", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "DeletePipelineJob" + "shortName": "GenerateContent" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeletePipelineJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GenerateContentRequest" }, { - "name": "name", + "name": "model", "type": "str" }, + { + "name": "contents", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -32827,14 +37753,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_pipeline_job" + "resultType": "google.cloud.aiplatform_v1beta1.types.GenerateContentResponse", + "shortName": "generate_content" }, - "description": "Sample for DeletePipelineJob", - "file": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_sync.py", + "description": "Sample for GenerateContent", + "file": "aiplatform_v1beta1_generated_prediction_service_generate_content_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeletePipelineJob_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_GenerateContent_sync", "segments": [ { "end": 55, @@ -32852,13 +37778,13 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { "end": 52, - "start": 46, + "start": 50, "type": "REQUEST_EXECUTION" }, { @@ -32867,34 +37793,42 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_delete_pipeline_job_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_generate_content_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", - "shortName": "PipelineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.delete_training_pipeline", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Predict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "DeleteTrainingPipeline" + "shortName": "Predict" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest" + "type": "google.cloud.aiplatform_v1beta1.types.PredictRequest" }, { - "name": "name", + "name": "endpoint", "type": "str" }, + { + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -32908,14 +37842,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_training_pipeline" + "resultType": "google.cloud.aiplatform_v1beta1.types.PredictResponse", + "shortName": "predict" }, - "description": "Sample for DeleteTrainingPipeline", - "file": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_async.py", + "description": "Sample for Predict", + "file": "aiplatform_v1beta1_generated_prediction_service_predict_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeleteTrainingPipeline_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_Predict_async", "segments": [ { "end": 55, @@ -32933,13 +37867,13 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { "end": 52, - "start": 46, + "start": 50, "type": "REQUEST_EXECUTION" }, { @@ -32948,33 +37882,41 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_predict_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", - "shortName": "PipelineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.delete_training_pipeline", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Predict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "DeleteTrainingPipeline" + "shortName": "Predict" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteTrainingPipelineRequest" + "type": "google.cloud.aiplatform_v1beta1.types.PredictRequest" }, { - "name": "name", + "name": "endpoint", "type": "str" }, + { + "name": "instances", + "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + }, + { + "name": "parameters", + "type": "google.protobuf.struct_pb2.Value" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -32988,14 +37930,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_training_pipeline" + "resultType": "google.cloud.aiplatform_v1beta1.types.PredictResponse", + "shortName": "predict" }, - "description": "Sample for DeleteTrainingPipeline", - "file": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_sync.py", + "description": "Sample for Predict", + "file": "aiplatform_v1beta1_generated_prediction_service_predict_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_DeleteTrainingPipeline_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_Predict_sync", "segments": [ { "end": 55, @@ -33013,13 +37955,13 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { "end": 52, - "start": 46, + "start": 50, "type": "REQUEST_EXECUTION" }, { @@ -33028,34 +37970,38 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_delete_training_pipeline_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_predict_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", - "shortName": "PipelineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.get_pipeline_job", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.raw_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.RawPredict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "GetPipelineJob" + "shortName": "RawPredict" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.RawPredictRequest" }, { - "name": "name", + "name": "endpoint", "type": "str" }, + { + "name": "http_body", + "type": "google.api.httpbody_pb2.HttpBody" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -33069,14 +38015,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", - "shortName": "get_pipeline_job" + "resultType": "google.api.httpbody_pb2.HttpBody", + "shortName": "raw_predict" }, - "description": "Sample for GetPipelineJob", - "file": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_async.py", + "description": "Sample for RawPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_raw_predict_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetPipelineJob_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_RawPredict_async", "segments": [ { "end": 51, @@ -33109,33 +38055,37 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_raw_predict_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", - "shortName": "PipelineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.get_pipeline_job", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.raw_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetPipelineJob", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.RawPredict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "GetPipelineJob" + "shortName": "RawPredict" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetPipelineJobRequest" + "type": "google.cloud.aiplatform_v1beta1.types.RawPredictRequest" }, { - "name": "name", + "name": "endpoint", "type": "str" }, + { + "name": "http_body", + "type": "google.api.httpbody_pb2.HttpBody" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -33149,14 +38099,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.PipelineJob", - "shortName": "get_pipeline_job" + "resultType": "google.api.httpbody_pb2.HttpBody", + "shortName": "raw_predict" }, - "description": "Sample for GetPipelineJob", - "file": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_sync.py", + "description": "Sample for RawPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_raw_predict_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetPipelineJob_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_RawPredict_sync", "segments": [ { "end": 51, @@ -33189,33 +38139,29 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_get_pipeline_job_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_raw_predict_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", - "shortName": "PipelineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.get_training_pipeline", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.server_streaming_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.ServerStreamingPredict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "GetTrainingPipeline" + "shortName": "ServerStreamingPredict" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.StreamingPredictRequest" }, { "name": "retry", @@ -33230,22 +38176,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", - "shortName": "get_training_pipeline" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingPredictResponse]", + "shortName": "server_streaming_predict" }, - "description": "Sample for GetTrainingPipeline", - "file": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_async.py", + "description": "Sample for ServerStreamingPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_server_streaming_predict_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetTrainingPipeline_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_ServerStreamingPredict_async", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -33265,37 +38211,33 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_server_streaming_predict_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", - "shortName": "PipelineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.get_training_pipeline", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.server_streaming_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.ServerStreamingPredict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "GetTrainingPipeline" + "shortName": "ServerStreamingPredict" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetTrainingPipelineRequest" - }, - { - "name": "name", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.StreamingPredictRequest" }, { "name": "retry", @@ -33310,22 +38252,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TrainingPipeline", - "shortName": "get_training_pipeline" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingPredictResponse]", + "shortName": "server_streaming_predict" }, - "description": "Sample for GetTrainingPipeline", - "file": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_sync.py", + "description": "Sample for ServerStreamingPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_server_streaming_predict_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_GetTrainingPipeline_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_ServerStreamingPredict_sync", "segments": [ { - "end": 51, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 52, "start": 27, "type": "SHORT" }, @@ -33345,38 +38287,34 @@ "type": "REQUEST_EXECUTION" }, { - "end": 52, + "end": 53, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_get_training_pipeline_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_server_streaming_predict_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", - "shortName": "PipelineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.list_pipeline_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.stream_direct_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamDirectPredict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "ListPipelineJobs" + "shortName": "StreamDirectPredict" }, "parameters": [ { - "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest" - }, - { - "name": "parent", - "type": "str" + "name": "requests", + "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamDirectPredictRequest]" }, { "name": "retry", @@ -33391,22 +38329,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsAsyncPager", - "shortName": "list_pipeline_jobs" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamDirectPredictResponse]", + "shortName": "stream_direct_predict" }, - "description": "Sample for ListPipelineJobs", - "file": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_async.py", + "description": "Sample for StreamDirectPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_stream_direct_predict_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListPipelineJobs_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamDirectPredict_async", "segments": [ { - "end": 52, + "end": 62, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 62, "start": 27, "type": "SHORT" }, @@ -33416,47 +38354,43 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 55, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 56, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 63, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_stream_direct_predict_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", - "shortName": "PipelineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.list_pipeline_jobs", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.stream_direct_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListPipelineJobs", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamDirectPredict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "ListPipelineJobs" + "shortName": "StreamDirectPredict" }, "parameters": [ { - "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListPipelineJobsRequest" - }, - { - "name": "parent", - "type": "str" + "name": "requests", + "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamDirectPredictRequest]" }, { "name": "retry", @@ -33471,22 +38405,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListPipelineJobsPager", - "shortName": "list_pipeline_jobs" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamDirectPredictResponse]", + "shortName": "stream_direct_predict" }, - "description": "Sample for ListPipelineJobs", - "file": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_sync.py", + "description": "Sample for StreamDirectPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_stream_direct_predict_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListPipelineJobs_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamDirectPredict_sync", "segments": [ { - "end": 52, + "end": 62, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 62, "start": 27, "type": "SHORT" }, @@ -33496,48 +38430,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 55, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 56, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 63, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_list_pipeline_jobs_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_stream_direct_predict_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient", - "shortName": "PipelineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", + "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceAsyncClient.list_training_pipelines", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.stream_direct_raw_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamDirectRawPredict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "ListTrainingPipelines" + "shortName": "StreamDirectRawPredict" }, "parameters": [ { - "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest" - }, - { - "name": "parent", - "type": "str" + "name": "requests", + "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamDirectRawPredictRequest]" }, { "name": "retry", @@ -33552,22 +38482,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesAsyncPager", - "shortName": "list_training_pipelines" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamDirectRawPredictResponse]", + "shortName": "stream_direct_raw_predict" }, - "description": "Sample for ListTrainingPipelines", - "file": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_async.py", + "description": "Sample for StreamDirectRawPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_stream_direct_raw_predict_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListTrainingPipelines_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamDirectRawPredict_async", "segments": [ { - "end": 52, + "end": 62, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 62, "start": 27, "type": "SHORT" }, @@ -33577,47 +38507,43 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 55, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 56, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 63, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_stream_direct_raw_predict_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient", - "shortName": "PipelineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", + "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PipelineServiceClient.list_training_pipelines", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.stream_direct_raw_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamDirectRawPredict", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PipelineService", - "shortName": "PipelineService" + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", + "shortName": "PredictionService" }, - "shortName": "ListTrainingPipelines" + "shortName": "StreamDirectRawPredict" }, "parameters": [ { - "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListTrainingPipelinesRequest" - }, - { - "name": "parent", - "type": "str" + "name": "requests", + "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamDirectRawPredictRequest]" }, { "name": "retry", @@ -33632,22 +38558,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.pipeline_service.pagers.ListTrainingPipelinesPager", - "shortName": "list_training_pipelines" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamDirectRawPredictResponse]", + "shortName": "stream_direct_raw_predict" }, - "description": "Sample for ListTrainingPipelines", - "file": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_sync.py", + "description": "Sample for StreamDirectRawPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_stream_direct_raw_predict_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PipelineService_ListTrainingPipelines_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamDirectRawPredict_sync", "segments": [ { - "end": 52, + "end": 62, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 62, "start": 27, "type": "SHORT" }, @@ -33657,22 +38583,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 55, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 56, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 63, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_pipeline_service_list_training_pipelines_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_stream_direct_raw_predict_sync.py" }, { "canonical": true, @@ -33682,27 +38608,27 @@ "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.count_tokens", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.stream_generate_content", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.CountTokens", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamGenerateContent", "service": { "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", "shortName": "PredictionService" }, - "shortName": "CountTokens" + "shortName": "StreamGenerateContent" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CountTokensRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GenerateContentRequest" }, { - "name": "endpoint", + "name": "model", "type": "str" }, { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + "name": "contents", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]" }, { "name": "retry", @@ -33717,22 +38643,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.CountTokensResponse", - "shortName": "count_tokens" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.GenerateContentResponse]", + "shortName": "stream_generate_content" }, - "description": "Sample for CountTokens", - "file": "aiplatform_v1beta1_generated_prediction_service_count_tokens_async.py", + "description": "Sample for StreamGenerateContent", + "file": "aiplatform_v1beta1_generated_prediction_service_stream_generate_content_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_CountTokens_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamGenerateContent_async", "segments": [ { - "end": 60, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 56, "start": 27, "type": "SHORT" }, @@ -33742,22 +38668,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 55, + "end": 52, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 57, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_count_tokens_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_stream_generate_content_async.py" }, { "canonical": true, @@ -33766,27 +38692,27 @@ "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.count_tokens", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.stream_generate_content", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.CountTokens", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamGenerateContent", "service": { "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", "shortName": "PredictionService" }, - "shortName": "CountTokens" + "shortName": "StreamGenerateContent" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CountTokensRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GenerateContentRequest" }, { - "name": "endpoint", + "name": "model", "type": "str" }, { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" + "name": "contents", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]" }, { "name": "retry", @@ -33801,22 +38727,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.CountTokensResponse", - "shortName": "count_tokens" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.GenerateContentResponse]", + "shortName": "stream_generate_content" }, - "description": "Sample for CountTokens", - "file": "aiplatform_v1beta1_generated_prediction_service_count_tokens_sync.py", + "description": "Sample for StreamGenerateContent", + "file": "aiplatform_v1beta1_generated_prediction_service_stream_generate_content_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_CountTokens_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamGenerateContent_sync", "segments": [ { - "end": 60, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 56, "start": 27, "type": "SHORT" }, @@ -33826,22 +38752,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 54, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 57, - "start": 55, + "end": 52, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 57, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_count_tokens_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_stream_generate_content_sync.py" }, { "canonical": true, @@ -33851,19 +38777,19 @@ "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.direct_predict", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.streaming_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.DirectPredict", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamingPredict", "service": { "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", "shortName": "PredictionService" }, - "shortName": "DirectPredict" + "shortName": "StreamingPredict" }, "parameters": [ { - "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DirectPredictRequest" + "name": "requests", + "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamingPredictRequest]" }, { "name": "retry", @@ -33878,22 +38804,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.DirectPredictResponse", - "shortName": "direct_predict" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingPredictResponse]", + "shortName": "streaming_predict" }, - "description": "Sample for DirectPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_direct_predict_async.py", + "description": "Sample for StreamingPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_streaming_predict_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_DirectPredict_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamingPredict_async", "segments": [ { - "end": 51, + "end": 62, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 62, "start": 27, "type": "SHORT" }, @@ -33903,22 +38829,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 55, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 56, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 63, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_direct_predict_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_streaming_predict_async.py" }, { "canonical": true, @@ -33927,19 +38853,19 @@ "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.direct_predict", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.streaming_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.DirectPredict", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamingPredict", "service": { "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", "shortName": "PredictionService" }, - "shortName": "DirectPredict" + "shortName": "StreamingPredict" }, "parameters": [ { - "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DirectPredictRequest" + "name": "requests", + "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamingPredictRequest]" }, { "name": "retry", @@ -33954,22 +38880,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.DirectPredictResponse", - "shortName": "direct_predict" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingPredictResponse]", + "shortName": "streaming_predict" }, - "description": "Sample for DirectPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_direct_predict_sync.py", + "description": "Sample for StreamingPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_streaming_predict_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_DirectPredict_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamingPredict_sync", "segments": [ { - "end": 51, + "end": 62, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 62, "start": 27, "type": "SHORT" }, @@ -33979,22 +38905,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 55, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 56, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 63, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_direct_predict_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_streaming_predict_sync.py" }, { "canonical": true, @@ -34004,19 +38930,19 @@ "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", "shortName": "PredictionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.direct_raw_predict", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.streaming_raw_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.DirectRawPredict", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamingRawPredict", "service": { "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", "shortName": "PredictionService" }, - "shortName": "DirectRawPredict" + "shortName": "StreamingRawPredict" }, "parameters": [ { - "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DirectRawPredictRequest" + "name": "requests", + "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamingRawPredictRequest]" }, { "name": "retry", @@ -34031,22 +38957,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.DirectRawPredictResponse", - "shortName": "direct_raw_predict" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingRawPredictResponse]", + "shortName": "streaming_raw_predict" }, - "description": "Sample for DirectRawPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_direct_raw_predict_async.py", + "description": "Sample for StreamingRawPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_streaming_raw_predict_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_DirectRawPredict_async", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamingRawPredict_async", "segments": [ { - "end": 51, + "end": 62, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 62, "start": 27, "type": "SHORT" }, @@ -34056,22 +38982,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 55, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 56, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 63, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_direct_raw_predict_async.py" + "title": "aiplatform_v1beta1_generated_prediction_service_streaming_raw_predict_async.py" }, { "canonical": true, @@ -34080,19 +39006,19 @@ "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", "shortName": "PredictionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.direct_raw_predict", + "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.streaming_raw_predict", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.DirectRawPredict", + "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamingRawPredict", "service": { "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", "shortName": "PredictionService" }, - "shortName": "DirectRawPredict" + "shortName": "StreamingRawPredict" }, "parameters": [ { - "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DirectRawPredictRequest" + "name": "requests", + "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamingRawPredictRequest]" }, { "name": "retry", @@ -34107,22 +39033,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.DirectRawPredictResponse", - "shortName": "direct_raw_predict" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingRawPredictResponse]", + "shortName": "streaming_raw_predict" }, - "description": "Sample for DirectRawPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_direct_raw_predict_sync.py", + "description": "Sample for StreamingRawPredict", + "file": "aiplatform_v1beta1_generated_prediction_service_streaming_raw_predict_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_DirectRawPredict_sync", + "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamingRawPredict_sync", "segments": [ { - "end": 51, + "end": 62, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 62, "start": 27, "type": "SHORT" }, @@ -34132,60 +39058,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 55, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 58, + "start": 56, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 63, + "start": 59, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_direct_raw_predict_sync.py" + "title": "aiplatform_v1beta1_generated_prediction_service_streaming_raw_predict_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineExecutionServiceAsyncClient", + "shortName": "ReasoningEngineExecutionServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.explain", + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineExecutionServiceAsyncClient.query_reasoning_engine", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Explain", + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineExecutionService.QueryReasoningEngine", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineExecutionService", + "shortName": "ReasoningEngineExecutionService" }, - "shortName": "Explain" + "shortName": "QueryReasoningEngine" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ExplainRequest" - }, - { - "name": "endpoint", - "type": "str" - }, - { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" - }, - { - "name": "parameters", - "type": "google.protobuf.struct_pb2.Value" - }, - { - "name": "deployed_model_id", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.QueryReasoningEngineRequest" }, { "name": "retry", @@ -34200,22 +39110,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ExplainResponse", - "shortName": "explain" + "resultType": "google.cloud.aiplatform_v1beta1.types.QueryReasoningEngineResponse", + "shortName": "query_reasoning_engine" }, - "description": "Sample for Explain", - "file": "aiplatform_v1beta1_generated_prediction_service_explain_async.py", + "description": "Sample for QueryReasoningEngine", + "file": "aiplatform_v1beta1_generated_reasoning_engine_execution_service_query_reasoning_engine_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_Explain_async", + "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineExecutionService_QueryReasoningEngine_async", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -34225,59 +39135,43 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_explain_async.py" + "title": "aiplatform_v1beta1_generated_reasoning_engine_execution_service_query_reasoning_engine_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", - "shortName": "PredictionServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineExecutionServiceClient", + "shortName": "ReasoningEngineExecutionServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.explain", + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineExecutionServiceClient.query_reasoning_engine", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Explain", + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineExecutionService.QueryReasoningEngine", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineExecutionService", + "shortName": "ReasoningEngineExecutionService" }, - "shortName": "Explain" + "shortName": "QueryReasoningEngine" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ExplainRequest" - }, - { - "name": "endpoint", - "type": "str" - }, - { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" - }, - { - "name": "parameters", - "type": "google.protobuf.struct_pb2.Value" - }, - { - "name": "deployed_model_id", - "type": "str" + "type": "google.cloud.aiplatform_v1beta1.types.QueryReasoningEngineRequest" }, { "name": "retry", @@ -34292,22 +39186,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ExplainResponse", - "shortName": "explain" + "resultType": "google.cloud.aiplatform_v1beta1.types.QueryReasoningEngineResponse", + "shortName": "query_reasoning_engine" }, - "description": "Sample for Explain", - "file": "aiplatform_v1beta1_generated_prediction_service_explain_sync.py", + "description": "Sample for QueryReasoningEngine", + "file": "aiplatform_v1beta1_generated_reasoning_engine_execution_service_query_reasoning_engine_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_Explain_sync", + "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineExecutionService_QueryReasoningEngine_sync", "segments": [ { - "end": 55, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 51, "start": 27, "type": "SHORT" }, @@ -34317,52 +39211,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_explain_sync.py" + "title": "aiplatform_v1beta1_generated_reasoning_engine_execution_service_query_reasoning_engine_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient", + "shortName": "ReasoningEngineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.generate_content", + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient.create_reasoning_engine", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.GenerateContent", + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.CreateReasoningEngine", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", + "shortName": "ReasoningEngineService" }, - "shortName": "GenerateContent" + "shortName": "CreateReasoningEngine" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GenerateContentRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateReasoningEngineRequest" }, { - "name": "model", + "name": "parent", "type": "str" }, { - "name": "contents", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]" + "name": "reasoning_engine", + "type": "google.cloud.aiplatform_v1beta1.types.ReasoningEngine" }, { "name": "retry", @@ -34377,22 +39271,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.GenerateContentResponse", - "shortName": "generate_content" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_reasoning_engine" }, - "description": "Sample for GenerateContent", - "file": "aiplatform_v1beta1_generated_prediction_service_generate_content_async.py", + "description": "Sample for CreateReasoningEngine", + "file": "aiplatform_v1beta1_generated_reasoning_engine_service_create_reasoning_engine_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_GenerateContent_async", + "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_CreateReasoningEngine_async", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -34407,46 +39301,46 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 56, "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_generate_content_async.py" + "title": "aiplatform_v1beta1_generated_reasoning_engine_service_create_reasoning_engine_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", - "shortName": "PredictionServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient", + "shortName": "ReasoningEngineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.generate_content", + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient.create_reasoning_engine", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.GenerateContent", + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.CreateReasoningEngine", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", + "shortName": "ReasoningEngineService" }, - "shortName": "GenerateContent" + "shortName": "CreateReasoningEngine" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GenerateContentRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateReasoningEngineRequest" }, { - "name": "model", + "name": "parent", "type": "str" }, { - "name": "contents", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]" + "name": "reasoning_engine", + "type": "google.cloud.aiplatform_v1beta1.types.ReasoningEngine" }, { "name": "retry", @@ -34461,22 +39355,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.GenerateContentResponse", - "shortName": "generate_content" + "resultType": "google.api_core.operation.Operation", + "shortName": "create_reasoning_engine" }, - "description": "Sample for GenerateContent", - "file": "aiplatform_v1beta1_generated_prediction_service_generate_content_sync.py", + "description": "Sample for CreateReasoningEngine", + "file": "aiplatform_v1beta1_generated_reasoning_engine_service_create_reasoning_engine_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_GenerateContent_sync", + "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_CreateReasoningEngine_sync", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -34491,52 +39385,44 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 56, "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_generate_content_sync.py" + "title": "aiplatform_v1beta1_generated_reasoning_engine_service_create_reasoning_engine_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient", + "shortName": "ReasoningEngineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.predict", + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient.delete_reasoning_engine", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Predict", + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.DeleteReasoningEngine", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", + "shortName": "ReasoningEngineService" }, - "shortName": "Predict" + "shortName": "DeleteReasoningEngine" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.PredictRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteReasoningEngineRequest" }, { - "name": "endpoint", + "name": "name", "type": "str" }, - { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" - }, - { - "name": "parameters", - "type": "google.protobuf.struct_pb2.Value" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -34550,14 +39436,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.PredictResponse", - "shortName": "predict" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_reasoning_engine" }, - "description": "Sample for Predict", - "file": "aiplatform_v1beta1_generated_prediction_service_predict_async.py", + "description": "Sample for DeleteReasoningEngine", + "file": "aiplatform_v1beta1_generated_reasoning_engine_service_delete_reasoning_engine_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_Predict_async", + "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_DeleteReasoningEngine_async", "segments": [ { "end": 55, @@ -34575,13 +39461,13 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { "end": 52, - "start": 50, + "start": 46, "type": "REQUEST_EXECUTION" }, { @@ -34590,41 +39476,33 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_predict_async.py" + "title": "aiplatform_v1beta1_generated_reasoning_engine_service_delete_reasoning_engine_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", - "shortName": "PredictionServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient", + "shortName": "ReasoningEngineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.predict", + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient.delete_reasoning_engine", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.Predict", + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.DeleteReasoningEngine", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", + "shortName": "ReasoningEngineService" }, - "shortName": "Predict" + "shortName": "DeleteReasoningEngine" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.PredictRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteReasoningEngineRequest" }, { - "name": "endpoint", + "name": "name", "type": "str" }, - { - "name": "instances", - "type": "MutableSequence[google.protobuf.struct_pb2.Value]" - }, - { - "name": "parameters", - "type": "google.protobuf.struct_pb2.Value" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -34638,14 +39516,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.PredictResponse", - "shortName": "predict" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_reasoning_engine" }, - "description": "Sample for Predict", - "file": "aiplatform_v1beta1_generated_prediction_service_predict_sync.py", + "description": "Sample for DeleteReasoningEngine", + "file": "aiplatform_v1beta1_generated_reasoning_engine_service_delete_reasoning_engine_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_Predict_sync", + "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_DeleteReasoningEngine_sync", "segments": [ { "end": 55, @@ -34663,13 +39541,13 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { "end": 52, - "start": 50, + "start": 46, "type": "REQUEST_EXECUTION" }, { @@ -34678,38 +39556,34 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_predict_sync.py" + "title": "aiplatform_v1beta1_generated_reasoning_engine_service_delete_reasoning_engine_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient", + "shortName": "ReasoningEngineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.raw_predict", + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient.get_reasoning_engine", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.RawPredict", + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.GetReasoningEngine", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", + "shortName": "ReasoningEngineService" }, - "shortName": "RawPredict" + "shortName": "GetReasoningEngine" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.RawPredictRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetReasoningEngineRequest" }, { - "name": "endpoint", + "name": "name", "type": "str" }, - { - "name": "http_body", - "type": "google.api.httpbody_pb2.HttpBody" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -34723,14 +39597,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api.httpbody_pb2.HttpBody", - "shortName": "raw_predict" + "resultType": "google.cloud.aiplatform_v1beta1.types.ReasoningEngine", + "shortName": "get_reasoning_engine" }, - "description": "Sample for RawPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_raw_predict_async.py", + "description": "Sample for GetReasoningEngine", + "file": "aiplatform_v1beta1_generated_reasoning_engine_service_get_reasoning_engine_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_RawPredict_async", + "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_GetReasoningEngine_async", "segments": [ { "end": 51, @@ -34763,37 +39637,33 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_raw_predict_async.py" + "title": "aiplatform_v1beta1_generated_reasoning_engine_service_get_reasoning_engine_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", - "shortName": "PredictionServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient", + "shortName": "ReasoningEngineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.raw_predict", + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient.get_reasoning_engine", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.RawPredict", + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.GetReasoningEngine", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", + "shortName": "ReasoningEngineService" }, - "shortName": "RawPredict" + "shortName": "GetReasoningEngine" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.RawPredictRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetReasoningEngineRequest" }, { - "name": "endpoint", + "name": "name", "type": "str" }, - { - "name": "http_body", - "type": "google.api.httpbody_pb2.HttpBody" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -34807,14 +39677,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api.httpbody_pb2.HttpBody", - "shortName": "raw_predict" + "resultType": "google.cloud.aiplatform_v1beta1.types.ReasoningEngine", + "shortName": "get_reasoning_engine" }, - "description": "Sample for RawPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_raw_predict_sync.py", + "description": "Sample for GetReasoningEngine", + "file": "aiplatform_v1beta1_generated_reasoning_engine_service_get_reasoning_engine_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_RawPredict_sync", + "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_GetReasoningEngine_sync", "segments": [ { "end": 51, @@ -34847,29 +39717,33 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_raw_predict_sync.py" + "title": "aiplatform_v1beta1_generated_reasoning_engine_service_get_reasoning_engine_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient", + "shortName": "ReasoningEngineServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.server_streaming_predict", + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient.list_reasoning_engines", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.ServerStreamingPredict", + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.ListReasoningEngines", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", + "shortName": "ReasoningEngineService" }, - "shortName": "ServerStreamingPredict" + "shortName": "ListReasoningEngines" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.StreamingPredictRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListReasoningEnginesRequest" + }, + { + "name": "parent", + "type": "str" }, { "name": "retry", @@ -34884,14 +39758,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingPredictResponse]", - "shortName": "server_streaming_predict" + "resultType": "google.cloud.aiplatform_v1beta1.services.reasoning_engine_service.pagers.ListReasoningEnginesAsyncPager", + "shortName": "list_reasoning_engines" }, - "description": "Sample for ServerStreamingPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_server_streaming_predict_async.py", + "description": "Sample for ListReasoningEngines", + "file": "aiplatform_v1beta1_generated_reasoning_engine_service_list_reasoning_engines_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_ServerStreamingPredict_async", + "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_ListReasoningEngines_async", "segments": [ { "end": 52, @@ -34924,28 +39798,32 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_server_streaming_predict_async.py" + "title": "aiplatform_v1beta1_generated_reasoning_engine_service_list_reasoning_engines_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", - "shortName": "PredictionServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient", + "shortName": "ReasoningEngineServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.server_streaming_predict", + "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient.list_reasoning_engines", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.ServerStreamingPredict", + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.ListReasoningEngines", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", + "shortName": "ReasoningEngineService" }, - "shortName": "ServerStreamingPredict" + "shortName": "ListReasoningEngines" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.StreamingPredictRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListReasoningEnginesRequest" + }, + { + "name": "parent", + "type": "str" }, { "name": "retry", @@ -34960,14 +39838,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingPredictResponse]", - "shortName": "server_streaming_predict" + "resultType": "google.cloud.aiplatform_v1beta1.services.reasoning_engine_service.pagers.ListReasoningEnginesPager", + "shortName": "list_reasoning_engines" }, - "description": "Sample for ServerStreamingPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_server_streaming_predict_sync.py", + "description": "Sample for ListReasoningEngines", + "file": "aiplatform_v1beta1_generated_reasoning_engine_service_list_reasoning_engines_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_ServerStreamingPredict_sync", + "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_ListReasoningEngines_sync", "segments": [ { "end": 52, @@ -35000,29 +39878,37 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_server_streaming_predict_sync.py" + "title": "aiplatform_v1beta1_generated_reasoning_engine_service_list_reasoning_engines_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", + "shortName": "ScheduleServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.stream_direct_predict", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.create_schedule", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamDirectPredict", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.CreateSchedule", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "StreamDirectPredict" + "shortName": "CreateSchedule" }, "parameters": [ { - "name": "requests", - "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamDirectPredictRequest]" + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateScheduleRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "schedule", + "type": "google.cloud.aiplatform_v1beta1.types.Schedule" }, { "name": "retry", @@ -35037,22 +39923,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamDirectPredictResponse]", - "shortName": "stream_direct_predict" + "resultType": "google.cloud.aiplatform_v1beta1.types.Schedule", + "shortName": "create_schedule" }, - "description": "Sample for StreamDirectPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_stream_direct_predict_async.py", + "description": "Sample for CreateSchedule", + "file": "aiplatform_v1beta1_generated_schedule_service_create_schedule_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamDirectPredict_async", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_CreateSchedule_async", "segments": [ { - "end": 62, + "end": 58, "start": 27, "type": "FULL" }, { - "end": 62, + "end": 58, "start": 27, "type": "SHORT" }, @@ -35062,43 +39948,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 55, + "end": 52, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 56, + "end": 55, + "start": 53, "type": "REQUEST_EXECUTION" }, { - "end": 63, - "start": 59, + "end": 59, + "start": 56, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_stream_direct_predict_async.py" + "title": "aiplatform_v1beta1_generated_schedule_service_create_schedule_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", - "shortName": "PredictionServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", + "shortName": "ScheduleServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.stream_direct_predict", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.create_schedule", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamDirectPredict", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.CreateSchedule", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "StreamDirectPredict" + "shortName": "CreateSchedule" }, "parameters": [ { - "name": "requests", - "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamDirectPredictRequest]" + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.CreateScheduleRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "schedule", + "type": "google.cloud.aiplatform_v1beta1.types.Schedule" }, { "name": "retry", @@ -35113,22 +40007,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamDirectPredictResponse]", - "shortName": "stream_direct_predict" + "resultType": "google.cloud.aiplatform_v1beta1.types.Schedule", + "shortName": "create_schedule" }, - "description": "Sample for StreamDirectPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_stream_direct_predict_sync.py", + "description": "Sample for CreateSchedule", + "file": "aiplatform_v1beta1_generated_schedule_service_create_schedule_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamDirectPredict_sync", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_CreateSchedule_sync", "segments": [ { - "end": 62, + "end": 58, "start": 27, "type": "FULL" }, { - "end": 62, + "end": 58, "start": 27, "type": "SHORT" }, @@ -35138,44 +40032,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 55, + "end": 52, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 56, + "end": 55, + "start": 53, "type": "REQUEST_EXECUTION" }, { - "end": 63, - "start": 59, + "end": 59, + "start": 56, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_stream_direct_predict_sync.py" + "title": "aiplatform_v1beta1_generated_schedule_service_create_schedule_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", + "shortName": "ScheduleServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.stream_direct_raw_predict", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.delete_schedule", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamDirectRawPredict", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.DeleteSchedule", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "StreamDirectRawPredict" + "shortName": "DeleteSchedule" }, "parameters": [ { - "name": "requests", - "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamDirectRawPredictRequest]" + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteScheduleRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -35190,22 +40088,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamDirectRawPredictResponse]", - "shortName": "stream_direct_raw_predict" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_schedule" }, - "description": "Sample for StreamDirectRawPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_stream_direct_raw_predict_async.py", + "description": "Sample for DeleteSchedule", + "file": "aiplatform_v1beta1_generated_schedule_service_delete_schedule_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamDirectRawPredict_async", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_DeleteSchedule_async", "segments": [ { - "end": 62, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 62, + "end": 55, "start": 27, "type": "SHORT" }, @@ -35215,43 +40113,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 55, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 56, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 63, - "start": 59, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_stream_direct_raw_predict_async.py" + "title": "aiplatform_v1beta1_generated_schedule_service_delete_schedule_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", - "shortName": "PredictionServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", + "shortName": "ScheduleServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.stream_direct_raw_predict", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.delete_schedule", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamDirectRawPredict", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.DeleteSchedule", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "StreamDirectRawPredict" + "shortName": "DeleteSchedule" }, "parameters": [ { - "name": "requests", - "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamDirectRawPredictRequest]" + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.DeleteScheduleRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -35266,22 +40168,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamDirectRawPredictResponse]", - "shortName": "stream_direct_raw_predict" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_schedule" }, - "description": "Sample for StreamDirectRawPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_stream_direct_raw_predict_sync.py", + "description": "Sample for DeleteSchedule", + "file": "aiplatform_v1beta1_generated_schedule_service_delete_schedule_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamDirectRawPredict_sync", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_DeleteSchedule_sync", "segments": [ { - "end": 62, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 62, + "end": 55, "start": 27, "type": "SHORT" }, @@ -35291,53 +40193,49 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 55, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 56, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 63, - "start": 59, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_stream_direct_raw_predict_sync.py" + "title": "aiplatform_v1beta1_generated_schedule_service_delete_schedule_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", + "shortName": "ScheduleServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.stream_generate_content", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.get_schedule", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamGenerateContent", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.GetSchedule", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "StreamGenerateContent" + "shortName": "GetSchedule" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GenerateContentRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetScheduleRequest" }, { - "name": "model", + "name": "name", "type": "str" }, - { - "name": "contents", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -35351,22 +40249,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.GenerateContentResponse]", - "shortName": "stream_generate_content" + "resultType": "google.cloud.aiplatform_v1beta1.types.Schedule", + "shortName": "get_schedule" }, - "description": "Sample for StreamGenerateContent", - "file": "aiplatform_v1beta1_generated_prediction_service_stream_generate_content_async.py", + "description": "Sample for GetSchedule", + "file": "aiplatform_v1beta1_generated_schedule_service_get_schedule_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamGenerateContent_async", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_GetSchedule_async", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -35376,52 +40274,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_stream_generate_content_async.py" + "title": "aiplatform_v1beta1_generated_schedule_service_get_schedule_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", - "shortName": "PredictionServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", + "shortName": "ScheduleServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.stream_generate_content", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.get_schedule", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamGenerateContent", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.GetSchedule", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "StreamGenerateContent" + "shortName": "GetSchedule" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GenerateContentRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetScheduleRequest" }, { - "name": "model", + "name": "name", "type": "str" }, - { - "name": "contents", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.Content]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -35435,22 +40329,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.GenerateContentResponse]", - "shortName": "stream_generate_content" + "resultType": "google.cloud.aiplatform_v1beta1.types.Schedule", + "shortName": "get_schedule" }, - "description": "Sample for StreamGenerateContent", - "file": "aiplatform_v1beta1_generated_prediction_service_stream_generate_content_sync.py", + "description": "Sample for GetSchedule", + "file": "aiplatform_v1beta1_generated_schedule_service_get_schedule_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamGenerateContent_sync", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_GetSchedule_sync", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -35460,44 +40354,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 53, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_stream_generate_content_sync.py" + "title": "aiplatform_v1beta1_generated_schedule_service_get_schedule_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", + "shortName": "ScheduleServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.streaming_predict", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.list_schedules", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamingPredict", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.ListSchedules", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "StreamingPredict" + "shortName": "ListSchedules" }, "parameters": [ { - "name": "requests", - "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamingPredictRequest]" + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListSchedulesRequest" + }, + { + "name": "parent", + "type": "str" }, { "name": "retry", @@ -35512,22 +40410,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingPredictResponse]", - "shortName": "streaming_predict" + "resultType": "google.cloud.aiplatform_v1beta1.services.schedule_service.pagers.ListSchedulesAsyncPager", + "shortName": "list_schedules" }, - "description": "Sample for StreamingPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_streaming_predict_async.py", + "description": "Sample for ListSchedules", + "file": "aiplatform_v1beta1_generated_schedule_service_list_schedules_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamingPredict_async", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_ListSchedules_async", "segments": [ { - "end": 62, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 62, + "end": 52, "start": 27, "type": "SHORT" }, @@ -35537,43 +40435,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 55, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 56, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 63, - "start": 59, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_streaming_predict_async.py" + "title": "aiplatform_v1beta1_generated_schedule_service_list_schedules_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", - "shortName": "PredictionServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", + "shortName": "ScheduleServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.streaming_predict", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.list_schedules", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamingPredict", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.ListSchedules", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "StreamingPredict" + "shortName": "ListSchedules" }, "parameters": [ { - "name": "requests", - "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamingPredictRequest]" + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.ListSchedulesRequest" + }, + { + "name": "parent", + "type": "str" }, { "name": "retry", @@ -35588,22 +40490,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingPredictResponse]", - "shortName": "streaming_predict" + "resultType": "google.cloud.aiplatform_v1beta1.services.schedule_service.pagers.ListSchedulesPager", + "shortName": "list_schedules" }, - "description": "Sample for StreamingPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_streaming_predict_sync.py", + "description": "Sample for ListSchedules", + "file": "aiplatform_v1beta1_generated_schedule_service_list_schedules_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamingPredict_sync", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_ListSchedules_sync", "segments": [ { - "end": 62, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 62, + "end": 52, "start": 27, "type": "SHORT" }, @@ -35613,44 +40515,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 55, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 56, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 63, - "start": 59, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_streaming_predict_sync.py" + "title": "aiplatform_v1beta1_generated_schedule_service_list_schedules_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient", - "shortName": "PredictionServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", + "shortName": "ScheduleServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceAsyncClient.streaming_raw_predict", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.pause_schedule", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamingRawPredict", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.PauseSchedule", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "StreamingRawPredict" + "shortName": "PauseSchedule" }, "parameters": [ { - "name": "requests", - "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamingRawPredictRequest]" + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PauseScheduleRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -35665,22 +40571,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingRawPredictResponse]", - "shortName": "streaming_raw_predict" + "shortName": "pause_schedule" }, - "description": "Sample for StreamingRawPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_streaming_raw_predict_async.py", + "description": "Sample for PauseSchedule", + "file": "aiplatform_v1beta1_generated_schedule_service_pause_schedule_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamingRawPredict_async", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_PauseSchedule_async", "segments": [ { - "end": 62, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 62, + "end": 49, "start": 27, "type": "SHORT" }, @@ -35690,43 +40595,45 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 55, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 56, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 63, - "start": 59, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_streaming_raw_predict_async.py" + "title": "aiplatform_v1beta1_generated_schedule_service_pause_schedule_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient", - "shortName": "PredictionServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", + "shortName": "ScheduleServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.PredictionServiceClient.streaming_raw_predict", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.pause_schedule", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService.StreamingRawPredict", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.PauseSchedule", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.PredictionService", - "shortName": "PredictionService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "StreamingRawPredict" + "shortName": "PauseSchedule" }, "parameters": [ { - "name": "requests", - "type": "Iterator[google.cloud.aiplatform_v1beta1.types.StreamingRawPredictRequest]" + "name": "request", + "type": "google.cloud.aiplatform_v1beta1.types.PauseScheduleRequest" + }, + { + "name": "name", + "type": "str" }, { "name": "retry", @@ -35741,22 +40648,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.StreamingRawPredictResponse]", - "shortName": "streaming_raw_predict" + "shortName": "pause_schedule" }, - "description": "Sample for StreamingRawPredict", - "file": "aiplatform_v1beta1_generated_prediction_service_streaming_raw_predict_sync.py", + "description": "Sample for PauseSchedule", + "file": "aiplatform_v1beta1_generated_schedule_service_pause_schedule_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_PredictionService_StreamingRawPredict_sync", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_PauseSchedule_sync", "segments": [ { - "end": 62, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 62, + "end": 49, "start": 27, "type": "SHORT" }, @@ -35766,44 +40672,50 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 55, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 58, - "start": 56, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 63, - "start": 59, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_prediction_service_streaming_raw_predict_sync.py" + "title": "aiplatform_v1beta1_generated_schedule_service_pause_schedule_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineExecutionServiceAsyncClient", - "shortName": "ReasoningEngineExecutionServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", + "shortName": "ScheduleServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineExecutionServiceAsyncClient.query_reasoning_engine", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.resume_schedule", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineExecutionService.QueryReasoningEngine", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.ResumeSchedule", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineExecutionService", - "shortName": "ReasoningEngineExecutionService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "QueryReasoningEngine" + "shortName": "ResumeSchedule" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.QueryReasoningEngineRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ResumeScheduleRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "catch_up", + "type": "bool" }, { "name": "retry", @@ -35818,22 +40730,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.QueryReasoningEngineResponse", - "shortName": "query_reasoning_engine" + "shortName": "resume_schedule" }, - "description": "Sample for QueryReasoningEngine", - "file": "aiplatform_v1beta1_generated_reasoning_engine_execution_service_query_reasoning_engine_async.py", + "description": "Sample for ResumeSchedule", + "file": "aiplatform_v1beta1_generated_schedule_service_resume_schedule_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineExecutionService_QueryReasoningEngine_async", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_ResumeSchedule_async", "segments": [ { - "end": 51, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 49, "start": 27, "type": "SHORT" }, @@ -35848,38 +40759,44 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_reasoning_engine_execution_service_query_reasoning_engine_async.py" + "title": "aiplatform_v1beta1_generated_schedule_service_resume_schedule_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineExecutionServiceClient", - "shortName": "ReasoningEngineExecutionServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", + "shortName": "ScheduleServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineExecutionServiceClient.query_reasoning_engine", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.resume_schedule", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineExecutionService.QueryReasoningEngine", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.ResumeSchedule", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineExecutionService", - "shortName": "ReasoningEngineExecutionService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "QueryReasoningEngine" + "shortName": "ResumeSchedule" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.QueryReasoningEngineRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ResumeScheduleRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "catch_up", + "type": "bool" }, { "name": "retry", @@ -35894,22 +40811,21 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.QueryReasoningEngineResponse", - "shortName": "query_reasoning_engine" + "shortName": "resume_schedule" }, - "description": "Sample for QueryReasoningEngine", - "file": "aiplatform_v1beta1_generated_reasoning_engine_execution_service_query_reasoning_engine_sync.py", + "description": "Sample for ResumeSchedule", + "file": "aiplatform_v1beta1_generated_schedule_service_resume_schedule_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineExecutionService_QueryReasoningEngine_sync", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_ResumeSchedule_sync", "segments": [ { - "end": 51, + "end": 49, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 49, "start": 27, "type": "SHORT" }, @@ -35924,47 +40840,45 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_reasoning_engine_execution_service_query_reasoning_engine_sync.py" + "title": "aiplatform_v1beta1_generated_schedule_service_resume_schedule_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient", - "shortName": "ReasoningEngineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", + "shortName": "ScheduleServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient.create_reasoning_engine", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.update_schedule", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.CreateReasoningEngine", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.UpdateSchedule", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", - "shortName": "ReasoningEngineService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "CreateReasoningEngine" + "shortName": "UpdateSchedule" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateReasoningEngineRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateScheduleRequest" }, { - "name": "parent", - "type": "str" + "name": "schedule", + "type": "google.cloud.aiplatform_v1beta1.types.Schedule" }, { - "name": "reasoning_engine", - "type": "google.cloud.aiplatform_v1beta1.types.ReasoningEngine" + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -35979,22 +40893,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_reasoning_engine" + "resultType": "google.cloud.aiplatform_v1beta1.types.Schedule", + "shortName": "update_schedule" }, - "description": "Sample for CreateReasoningEngine", - "file": "aiplatform_v1beta1_generated_reasoning_engine_service_create_reasoning_engine_async.py", + "description": "Sample for UpdateSchedule", + "file": "aiplatform_v1beta1_generated_schedule_service_update_schedule_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_CreateReasoningEngine_async", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_UpdateSchedule_async", "segments": [ { - "end": 59, + "end": 57, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 57, "start": 27, "type": "SHORT" }, @@ -36004,51 +40918,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 54, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 58, + "start": 55, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_reasoning_engine_service_create_reasoning_engine_async.py" + "title": "aiplatform_v1beta1_generated_schedule_service_update_schedule_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient", - "shortName": "ReasoningEngineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", + "shortName": "ScheduleServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient.create_reasoning_engine", + "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.update_schedule", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.CreateReasoningEngine", + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.UpdateSchedule", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", - "shortName": "ReasoningEngineService" + "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", + "shortName": "ScheduleService" }, - "shortName": "CreateReasoningEngine" + "shortName": "UpdateSchedule" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateReasoningEngineRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateScheduleRequest" }, { - "name": "parent", - "type": "str" + "name": "schedule", + "type": "google.cloud.aiplatform_v1beta1.types.Schedule" }, { - "name": "reasoning_engine", - "type": "google.cloud.aiplatform_v1beta1.types.ReasoningEngine" + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -36063,22 +40977,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_reasoning_engine" + "resultType": "google.cloud.aiplatform_v1beta1.types.Schedule", + "shortName": "update_schedule" }, - "description": "Sample for CreateReasoningEngine", - "file": "aiplatform_v1beta1_generated_reasoning_engine_service_create_reasoning_engine_sync.py", + "description": "Sample for UpdateSchedule", + "file": "aiplatform_v1beta1_generated_schedule_service_update_schedule_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_CreateReasoningEngine_sync", + "regionTag": "aiplatform_v1beta1_generated_ScheduleService_UpdateSchedule_sync", "segments": [ { - "end": 59, + "end": 57, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 57, "start": 27, "type": "SHORT" }, @@ -36088,49 +41002,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 54, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 58, + "start": 55, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_reasoning_engine_service_create_reasoning_engine_sync.py" + "title": "aiplatform_v1beta1_generated_schedule_service_update_schedule_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient", - "shortName": "ReasoningEngineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient.delete_reasoning_engine", + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.create_specialist_pool", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.DeleteReasoningEngine", + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", - "shortName": "ReasoningEngineService" + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" }, - "shortName": "DeleteReasoningEngine" + "shortName": "CreateSpecialistPool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteReasoningEngineRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -36145,21 +41063,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_reasoning_engine" + "shortName": "create_specialist_pool" }, - "description": "Sample for DeleteReasoningEngine", - "file": "aiplatform_v1beta1_generated_reasoning_engine_service_delete_reasoning_engine_async.py", + "description": "Sample for CreateSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_DeleteReasoningEngine_async", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_CreateSpecialistPool_async", "segments": [ { - "end": 55, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 60, "start": 27, "type": "SHORT" }, @@ -36169,48 +41087,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_reasoning_engine_service_delete_reasoning_engine_async.py" + "title": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient", - "shortName": "ReasoningEngineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient.delete_reasoning_engine", + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.create_specialist_pool", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.DeleteReasoningEngine", + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", - "shortName": "ReasoningEngineService" + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" }, - "shortName": "DeleteReasoningEngine" + "shortName": "CreateSpecialistPool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteReasoningEngineRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -36225,21 +41147,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_reasoning_engine" + "shortName": "create_specialist_pool" }, - "description": "Sample for DeleteReasoningEngine", - "file": "aiplatform_v1beta1_generated_reasoning_engine_service_delete_reasoning_engine_sync.py", + "description": "Sample for CreateSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_DeleteReasoningEngine_sync", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_CreateSpecialistPool_sync", "segments": [ { - "end": 55, + "end": 60, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 60, "start": 27, "type": "SHORT" }, @@ -36249,44 +41171,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 57, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 61, + "start": 58, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_reasoning_engine_service_delete_reasoning_engine_sync.py" + "title": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient", - "shortName": "ReasoningEngineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient.get_reasoning_engine", + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.delete_specialist_pool", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.GetReasoningEngine", + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", - "shortName": "ReasoningEngineService" + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" }, - "shortName": "GetReasoningEngine" + "shortName": "DeleteSpecialistPool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetReasoningEngineRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest" }, { "name": "name", @@ -36305,22 +41227,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ReasoningEngine", - "shortName": "get_reasoning_engine" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_specialist_pool" }, - "description": "Sample for GetReasoningEngine", - "file": "aiplatform_v1beta1_generated_reasoning_engine_service_get_reasoning_engine_async.py", + "description": "Sample for DeleteSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_GetReasoningEngine_async", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -36335,38 +41257,38 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_reasoning_engine_service_get_reasoning_engine_async.py" + "title": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient", - "shortName": "ReasoningEngineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient.get_reasoning_engine", + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.delete_specialist_pool", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.GetReasoningEngine", + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", - "shortName": "ReasoningEngineService" + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" }, - "shortName": "GetReasoningEngine" + "shortName": "DeleteSpecialistPool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetReasoningEngineRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest" }, { "name": "name", @@ -36385,22 +41307,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ReasoningEngine", - "shortName": "get_reasoning_engine" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_specialist_pool" }, - "description": "Sample for GetReasoningEngine", - "file": "aiplatform_v1beta1_generated_reasoning_engine_service_get_reasoning_engine_sync.py", + "description": "Sample for DeleteSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_GetReasoningEngine_sync", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -36415,42 +41337,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_reasoning_engine_service_get_reasoning_engine_sync.py" + "title": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient", - "shortName": "ReasoningEngineServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceAsyncClient.list_reasoning_engines", + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.get_specialist_pool", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.ListReasoningEngines", + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", - "shortName": "ReasoningEngineService" + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" }, - "shortName": "ListReasoningEngines" + "shortName": "GetSpecialistPool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListReasoningEnginesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -36466,22 +41388,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.reasoning_engine_service.pagers.ListReasoningEnginesAsyncPager", - "shortName": "list_reasoning_engines" + "resultType": "google.cloud.aiplatform_v1beta1.types.SpecialistPool", + "shortName": "get_specialist_pool" }, - "description": "Sample for ListReasoningEngines", - "file": "aiplatform_v1beta1_generated_reasoning_engine_service_list_reasoning_engines_async.py", + "description": "Sample for GetSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_ListReasoningEngines_async", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_GetSpecialistPool_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -36501,36 +41423,36 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_reasoning_engine_service_list_reasoning_engines_async.py" + "title": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient", - "shortName": "ReasoningEngineServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ReasoningEngineServiceClient.list_reasoning_engines", + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.get_specialist_pool", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService.ListReasoningEngines", + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ReasoningEngineService", - "shortName": "ReasoningEngineService" + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" }, - "shortName": "ListReasoningEngines" + "shortName": "GetSpecialistPool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListReasoningEnginesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -36546,22 +41468,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.reasoning_engine_service.pagers.ListReasoningEnginesPager", - "shortName": "list_reasoning_engines" + "resultType": "google.cloud.aiplatform_v1beta1.types.SpecialistPool", + "shortName": "get_specialist_pool" }, - "description": "Sample for ListReasoningEngines", - "file": "aiplatform_v1beta1_generated_reasoning_engine_service_list_reasoning_engines_sync.py", + "description": "Sample for GetSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ReasoningEngineService_ListReasoningEngines_sync", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_GetSpecialistPool_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -36581,43 +41503,39 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_reasoning_engine_service_list_reasoning_engines_sync.py" + "title": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", - "shortName": "ScheduleServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.create_schedule", + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.list_specialist_pools", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.CreateSchedule", + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" }, - "shortName": "CreateSchedule" + "shortName": "ListSpecialistPools" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateScheduleRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "schedule", - "type": "google.cloud.aiplatform_v1beta1.types.Schedule" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -36631,22 +41549,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Schedule", - "shortName": "create_schedule" + "resultType": "google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager", + "shortName": "list_specialist_pools" }, - "description": "Sample for CreateSchedule", - "file": "aiplatform_v1beta1_generated_schedule_service_create_schedule_async.py", + "description": "Sample for ListSpecialistPools", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_CreateSchedule_async", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_ListSpecialistPools_async", "segments": [ { - "end": 58, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 52, "start": 27, "type": "SHORT" }, @@ -36656,52 +41574,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 52, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 53, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_create_schedule_async.py" + "title": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", - "shortName": "ScheduleServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.create_schedule", + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.list_specialist_pools", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.CreateSchedule", + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" }, - "shortName": "CreateSchedule" + "shortName": "ListSpecialistPools" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateScheduleRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "schedule", - "type": "google.cloud.aiplatform_v1beta1.types.Schedule" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -36715,22 +41629,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Schedule", - "shortName": "create_schedule" + "resultType": "google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager", + "shortName": "list_specialist_pools" }, - "description": "Sample for CreateSchedule", - "file": "aiplatform_v1beta1_generated_schedule_service_create_schedule_sync.py", + "description": "Sample for ListSpecialistPools", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_CreateSchedule_sync", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_ListSpecialistPools_sync", "segments": [ { - "end": 58, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 52, "start": 27, "type": "SHORT" }, @@ -36740,48 +41654,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 52, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 53, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_create_schedule_sync.py" + "title": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", - "shortName": "ScheduleServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", + "shortName": "SpecialistPoolServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.delete_schedule", + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.update_specialist_pool", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.DeleteSchedule", + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" }, - "shortName": "DeleteSchedule" + "shortName": "UpdateSpecialistPool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteScheduleRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest" }, { - "name": "name", - "type": "str" + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -36797,21 +41715,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_schedule" + "shortName": "update_specialist_pool" }, - "description": "Sample for DeleteSchedule", - "file": "aiplatform_v1beta1_generated_schedule_service_delete_schedule_async.py", + "description": "Sample for UpdateSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_DeleteSchedule_async", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_UpdateSpecialistPool_async", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -36821,47 +41739,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_delete_schedule_async.py" + "title": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", - "shortName": "ScheduleServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", + "shortName": "SpecialistPoolServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.delete_schedule", + "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.update_specialist_pool", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.DeleteSchedule", + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", + "shortName": "SpecialistPoolService" }, - "shortName": "DeleteSchedule" + "shortName": "UpdateSpecialistPool" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteScheduleRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest" }, { - "name": "name", - "type": "str" + "name": "specialist_pool", + "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -36877,21 +41799,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_schedule" + "shortName": "update_specialist_pool" }, - "description": "Sample for DeleteSchedule", - "file": "aiplatform_v1beta1_generated_schedule_service_delete_schedule_sync.py", + "description": "Sample for UpdateSpecialistPool", + "file": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_DeleteSchedule_sync", + "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_UpdateSpecialistPool_sync", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -36901,49 +41823,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_delete_schedule_sync.py" + "title": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", - "shortName": "ScheduleServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.get_schedule", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.batch_create_tensorboard_runs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.GetSchedule", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "GetSchedule" + "shortName": "BatchCreateTensorboardRuns" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetScheduleRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "requests", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -36957,22 +41883,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Schedule", - "shortName": "get_schedule" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse", + "shortName": "batch_create_tensorboard_runs" }, - "description": "Sample for GetSchedule", - "file": "aiplatform_v1beta1_generated_schedule_service_get_schedule_async.py", + "description": "Sample for BatchCreateTensorboardRuns", + "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_GetSchedule_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardRuns_async", "segments": [ { - "end": 51, + "end": 57, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 57, "start": 27, "type": "SHORT" }, @@ -36982,48 +41908,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 54, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 58, + "start": 55, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_get_schedule_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", - "shortName": "ScheduleServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.get_schedule", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.batch_create_tensorboard_runs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.GetSchedule", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "GetSchedule" + "shortName": "BatchCreateTensorboardRuns" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetScheduleRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "requests", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -37037,22 +41967,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Schedule", - "shortName": "get_schedule" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse", + "shortName": "batch_create_tensorboard_runs" }, - "description": "Sample for GetSchedule", - "file": "aiplatform_v1beta1_generated_schedule_service_get_schedule_sync.py", + "description": "Sample for BatchCreateTensorboardRuns", + "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_GetSchedule_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardRuns_sync", "segments": [ { - "end": 51, + "end": 57, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 57, "start": 27, "type": "SHORT" }, @@ -37062,49 +41992,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 54, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 58, + "start": 55, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_get_schedule_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", - "shortName": "ScheduleServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.list_schedules", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.batch_create_tensorboard_time_series", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.ListSchedules", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "ListSchedules" + "shortName": "BatchCreateTensorboardTimeSeries" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListSchedulesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest" }, { "name": "parent", "type": "str" }, + { + "name": "requests", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -37118,22 +42052,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.schedule_service.pagers.ListSchedulesAsyncPager", - "shortName": "list_schedules" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse", + "shortName": "batch_create_tensorboard_time_series" }, - "description": "Sample for ListSchedules", - "file": "aiplatform_v1beta1_generated_schedule_service_list_schedules_async.py", + "description": "Sample for BatchCreateTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_ListSchedules_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_async", "segments": [ { - "end": 52, + "end": 57, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 57, "start": 27, "type": "SHORT" }, @@ -37143,48 +42077,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 54, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 58, + "start": 55, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_list_schedules_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", - "shortName": "ScheduleServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.list_schedules", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.batch_create_tensorboard_time_series", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.ListSchedules", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "ListSchedules" + "shortName": "BatchCreateTensorboardTimeSeries" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListSchedulesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest" }, { "name": "parent", "type": "str" }, + { + "name": "requests", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -37198,22 +42136,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.schedule_service.pagers.ListSchedulesPager", - "shortName": "list_schedules" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse", + "shortName": "batch_create_tensorboard_time_series" }, - "description": "Sample for ListSchedules", - "file": "aiplatform_v1beta1_generated_schedule_service_list_schedules_sync.py", + "description": "Sample for BatchCreateTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_ListSchedules_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_sync", "segments": [ { - "end": 52, + "end": 57, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 57, "start": 27, "type": "SHORT" }, @@ -37223,47 +42161,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 54, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 58, + "start": 55, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_list_schedules_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", - "shortName": "ScheduleServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.pause_schedule", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.batch_read_tensorboard_time_series_data", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.PauseSchedule", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "PauseSchedule" + "shortName": "BatchReadTensorboardTimeSeriesData" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.PauseScheduleRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest" }, { - "name": "name", + "name": "tensorboard", "type": "str" }, { @@ -37279,21 +42217,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "pause_schedule" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse", + "shortName": "batch_read_tensorboard_time_series_data" }, - "description": "Sample for PauseSchedule", - "file": "aiplatform_v1beta1_generated_schedule_service_pause_schedule_async.py", + "description": "Sample for BatchReadTensorboardTimeSeriesData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_PauseSchedule_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_async", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -37303,44 +42242,46 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "start": 46, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_pause_schedule_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", - "shortName": "ScheduleServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.pause_schedule", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.batch_read_tensorboard_time_series_data", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.PauseSchedule", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "PauseSchedule" + "shortName": "BatchReadTensorboardTimeSeriesData" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.PauseScheduleRequest" + "type": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest" }, { - "name": "name", + "name": "tensorboard", "type": "str" }, { @@ -37356,21 +42297,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "pause_schedule" + "resultType": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse", + "shortName": "batch_read_tensorboard_time_series_data" }, - "description": "Sample for PauseSchedule", - "file": "aiplatform_v1beta1_generated_schedule_service_pause_schedule_sync.py", + "description": "Sample for BatchReadTensorboardTimeSeriesData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_PauseSchedule_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_sync", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -37380,50 +42322,56 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "start": 46, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_pause_schedule_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", - "shortName": "ScheduleServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.resume_schedule", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard_experiment", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.ResumeSchedule", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "ResumeSchedule" + "shortName": "CreateTensorboardExperiment" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ResumeScheduleRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { - "name": "catch_up", - "type": "bool" + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + }, + { + "name": "tensorboard_experiment_id", + "type": "str" }, { "name": "retry", @@ -37438,21 +42386,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "resume_schedule" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "create_tensorboard_experiment" }, - "description": "Sample for ResumeSchedule", - "file": "aiplatform_v1beta1_generated_schedule_service_resume_schedule_async.py", + "description": "Sample for CreateTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_ResumeSchedule_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardExperiment_async", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -37462,49 +42411,55 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "start": 46, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_resume_schedule_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", - "shortName": "ScheduleServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.resume_schedule", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard_experiment", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.ResumeSchedule", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "ResumeSchedule" + "shortName": "CreateTensorboardExperiment" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ResumeScheduleRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { - "name": "catch_up", - "type": "bool" + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + }, + { + "name": "tensorboard_experiment_id", + "type": "str" }, { "name": "retry", @@ -37519,21 +42474,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "shortName": "resume_schedule" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "create_tensorboard_experiment" }, - "description": "Sample for ResumeSchedule", - "file": "aiplatform_v1beta1_generated_schedule_service_resume_schedule_sync.py", + "description": "Sample for CreateTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_ResumeSchedule_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardExperiment_sync", "segments": [ { - "end": 49, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 49, + "end": 52, "start": 27, "type": "SHORT" }, @@ -37543,50 +42499,56 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 46, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "start": 46, + "end": 49, + "start": 47, "type": "REQUEST_EXECUTION" }, { - "end": 50, + "end": 53, + "start": 50, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_resume_schedule_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient", - "shortName": "ScheduleServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceAsyncClient.update_schedule", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard_run", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.UpdateSchedule", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "UpdateSchedule" + "shortName": "CreateTensorboardRun" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateScheduleRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest" }, { - "name": "schedule", - "type": "google.cloud.aiplatform_v1beta1.types.Schedule" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + }, + { + "name": "tensorboard_run_id", + "type": "str" }, { "name": "retry", @@ -37601,22 +42563,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Schedule", - "shortName": "update_schedule" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "create_tensorboard_run" }, - "description": "Sample for UpdateSchedule", - "file": "aiplatform_v1beta1_generated_schedule_service_update_schedule_async.py", + "description": "Sample for CreateTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_UpdateSchedule_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardRun_async", "segments": [ { - "end": 57, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 56, "start": 27, "type": "SHORT" }, @@ -37626,51 +42588,55 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 52, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_update_schedule_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient", - "shortName": "ScheduleServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.ScheduleServiceClient.update_schedule", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard_run", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService.UpdateSchedule", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.ScheduleService", - "shortName": "ScheduleService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "UpdateSchedule" + "shortName": "CreateTensorboardRun" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateScheduleRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest" }, { - "name": "schedule", - "type": "google.cloud.aiplatform_v1beta1.types.Schedule" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + }, + { + "name": "tensorboard_run_id", + "type": "str" }, { "name": "retry", @@ -37685,22 +42651,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Schedule", - "shortName": "update_schedule" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "create_tensorboard_run" }, - "description": "Sample for UpdateSchedule", - "file": "aiplatform_v1beta1_generated_schedule_service_update_schedule_sync.py", + "description": "Sample for CreateTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_ScheduleService_UpdateSchedule_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardRun_sync", "segments": [ { - "end": 57, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 56, "start": 27, "type": "SHORT" }, @@ -37710,52 +42676,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 52, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_schedule_service_update_schedule_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", - "shortName": "SpecialistPoolServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.create_specialist_pool", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard_time_series", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", - "shortName": "SpecialistPoolService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "CreateSpecialistPool" + "shortName": "CreateTensorboardTimeSeries" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest" }, { "name": "parent", "type": "str" }, { - "name": "specialist_pool", - "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" }, { "name": "retry", @@ -37770,22 +42736,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_specialist_pool" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "create_tensorboard_time_series" }, - "description": "Sample for CreateSpecialistPool", - "file": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_async.py", + "description": "Sample for CreateTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_CreateSpecialistPool_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardTimeSeries_async", "segments": [ { - "end": 60, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 56, "start": 27, "type": "SHORT" }, @@ -37800,46 +42766,46 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 57, + "end": 53, "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", - "shortName": "SpecialistPoolServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.create_specialist_pool", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard_time_series", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", - "shortName": "SpecialistPoolService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "CreateSpecialistPool" + "shortName": "CreateTensorboardTimeSeries" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateSpecialistPoolRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest" }, { "name": "parent", "type": "str" }, { - "name": "specialist_pool", - "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" }, { "name": "retry", @@ -37854,22 +42820,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_specialist_pool" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "create_tensorboard_time_series" }, - "description": "Sample for CreateSpecialistPool", - "file": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_sync.py", + "description": "Sample for CreateTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_CreateSpecialistPool_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardTimeSeries_sync", "segments": [ { - "end": 60, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 60, + "end": 56, "start": 27, "type": "SHORT" }, @@ -37884,44 +42850,48 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 57, + "end": 53, "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 61, - "start": 58, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_specialist_pool_service_create_specialist_pool_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", - "shortName": "SpecialistPoolServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.delete_specialist_pool", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", - "shortName": "SpecialistPoolService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "DeleteSpecialistPool" + "shortName": "CreateTensorboard" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -37936,21 +42906,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_specialist_pool" + "shortName": "create_tensorboard" }, - "description": "Sample for DeleteSpecialistPool", - "file": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py", + "description": "Sample for CreateTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_async", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -37960,48 +42930,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", - "shortName": "SpecialistPoolServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.delete_specialist_pool", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", - "shortName": "SpecialistPoolService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "DeleteSpecialistPool" + "shortName": "CreateTensorboard" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteSpecialistPoolRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, + { + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -38016,21 +42990,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "delete_specialist_pool" + "shortName": "create_tensorboard" }, - "description": "Sample for DeleteSpecialistPool", - "file": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_sync.py", + "description": "Sample for CreateTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_DeleteSpecialistPool_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_sync", "segments": [ { - "end": 55, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 59, "start": 27, "type": "SHORT" }, @@ -38040,44 +43014,44 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_specialist_pool_service_delete_specialist_pool_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", - "shortName": "SpecialistPoolServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.get_specialist_pool", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard_experiment", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", - "shortName": "SpecialistPoolService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "GetSpecialistPool" + "shortName": "DeleteTensorboardExperiment" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest" }, { "name": "name", @@ -38096,22 +43070,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.SpecialistPool", - "shortName": "get_specialist_pool" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_experiment" }, - "description": "Sample for GetSpecialistPool", - "file": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_async.py", + "description": "Sample for DeleteTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_GetSpecialistPool_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardExperiment_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -38126,38 +43100,38 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", - "shortName": "SpecialistPoolServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.get_specialist_pool", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard_experiment", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", - "shortName": "SpecialistPoolService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "GetSpecialistPool" + "shortName": "DeleteTensorboardExperiment" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetSpecialistPoolRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest" }, { "name": "name", @@ -38176,22 +43150,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.SpecialistPool", - "shortName": "get_specialist_pool" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_experiment" }, - "description": "Sample for GetSpecialistPool", - "file": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_sync.py", + "description": "Sample for DeleteTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_GetSpecialistPool_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardExperiment_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -38206,42 +43180,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_specialist_pool_service_get_specialist_pool_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", - "shortName": "SpecialistPoolServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.list_specialist_pools", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard_run", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", - "shortName": "SpecialistPoolService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "ListSpecialistPools" + "shortName": "DeleteTensorboardRun" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -38257,22 +43231,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsAsyncPager", - "shortName": "list_specialist_pools" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard_run" }, - "description": "Sample for ListSpecialistPools", - "file": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_async.py", + "description": "Sample for DeleteTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_ListSpecialistPools_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardRun_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -38287,41 +43261,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", - "shortName": "SpecialistPoolServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.list_specialist_pools", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard_run", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", - "shortName": "SpecialistPoolService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "ListSpecialistPools" + "shortName": "DeleteTensorboardRun" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListSpecialistPoolsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, { @@ -38337,22 +43311,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.specialist_pool_service.pagers.ListSpecialistPoolsPager", - "shortName": "list_specialist_pools" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard_run" }, - "description": "Sample for ListSpecialistPools", - "file": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_sync.py", + "description": "Sample for DeleteTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_ListSpecialistPools_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardRun_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -38367,47 +43341,43 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_specialist_pool_service_list_specialist_pools_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient", - "shortName": "SpecialistPoolServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", + "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceAsyncClient.update_specialist_pool", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard_time_series", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", - "shortName": "SpecialistPoolService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "UpdateSpecialistPool" + "shortName": "DeleteTensorboardTimeSeries" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest" - }, - { - "name": "specialist_pool", - "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -38423,21 +43393,21 @@ } ], "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_specialist_pool" + "shortName": "delete_tensorboard_time_series" }, - "description": "Sample for UpdateSpecialistPool", - "file": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_async.py", + "description": "Sample for DeleteTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_UpdateSpecialistPool_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardTimeSeries_async", "segments": [ { - "end": 59, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 55, "start": 27, "type": "SHORT" }, @@ -38447,51 +43417,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient", - "shortName": "SpecialistPoolServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", + "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.SpecialistPoolServiceClient.update_specialist_pool", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard_time_series", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.SpecialistPoolService", - "shortName": "SpecialistPoolService" + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", + "shortName": "TensorboardService" }, - "shortName": "UpdateSpecialistPool" + "shortName": "DeleteTensorboardTimeSeries" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateSpecialistPoolRequest" - }, - { - "name": "specialist_pool", - "type": "google.cloud.aiplatform_v1beta1.types.SpecialistPool" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -38507,21 +43473,21 @@ } ], "resultType": "google.api_core.operation.Operation", - "shortName": "update_specialist_pool" + "shortName": "delete_tensorboard_time_series" }, - "description": "Sample for UpdateSpecialistPool", - "file": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_sync.py", + "description": "Sample for DeleteTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_SpecialistPoolService_UpdateSpecialistPool_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardTimeSeries_sync", "segments": [ { - "end": 59, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 55, "start": 27, "type": "SHORT" }, @@ -38531,22 +43497,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_specialist_pool_service_update_specialist_pool_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py" }, { "canonical": true, @@ -38556,28 +43522,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.batch_create_tensorboard_runs", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "BatchCreateTensorboardRuns" + "shortName": "DeleteTensorboard" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "requests", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -38591,22 +43553,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse", - "shortName": "batch_create_tensorboard_runs" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_tensorboard" }, - "description": "Sample for BatchCreateTensorboardRuns", - "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py", + "description": "Sample for DeleteTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardRuns_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboard_async", "segments": [ { - "end": 57, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 55, "start": 27, "type": "SHORT" }, @@ -38616,22 +43578,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 52, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_async.py" }, { "canonical": true, @@ -38640,28 +43602,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.batch_create_tensorboard_runs", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardRuns", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "BatchCreateTensorboardRuns" + "shortName": "DeleteTensorboard" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "requests", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -38675,22 +43633,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardRunsResponse", - "shortName": "batch_create_tensorboard_runs" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_tensorboard" }, - "description": "Sample for BatchCreateTensorboardRuns", - "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py", + "description": "Sample for DeleteTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardRuns_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboard_sync", "segments": [ { - "end": 57, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 55, "start": 27, "type": "SHORT" }, @@ -38700,22 +43658,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 52, + "end": 52, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_runs_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_sync.py" }, { "canonical": true, @@ -38725,28 +43683,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.batch_create_tensorboard_time_series", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.export_tensorboard_time_series_data", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "BatchCreateTensorboardTimeSeries" + "shortName": "ExportTensorboardTimeSeriesData" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest" }, { - "name": "parent", + "name": "tensorboard_time_series", "type": "str" }, - { - "name": "requests", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -38760,22 +43714,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse", - "shortName": "batch_create_tensorboard_time_series" + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager", + "shortName": "export_tensorboard_time_series_data" }, - "description": "Sample for BatchCreateTensorboardTimeSeries", - "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py", + "description": "Sample for ExportTensorboardTimeSeriesData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ExportTensorboardTimeSeriesData_async", "segments": [ { - "end": 57, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 52, "start": 27, "type": "SHORT" }, @@ -38785,22 +43739,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 52, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py" }, { "canonical": true, @@ -38809,28 +43763,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.batch_create_tensorboard_time_series", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.export_tensorboard_time_series_data", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchCreateTensorboardTimeSeries", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "BatchCreateTensorboardTimeSeries" + "shortName": "ExportTensorboardTimeSeriesData" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest" }, { - "name": "parent", + "name": "tensorboard_time_series", "type": "str" }, - { - "name": "requests", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest]" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -38844,22 +43794,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchCreateTensorboardTimeSeriesResponse", - "shortName": "batch_create_tensorboard_time_series" + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager", + "shortName": "export_tensorboard_time_series_data" }, - "description": "Sample for BatchCreateTensorboardTimeSeries", - "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py", + "description": "Sample for ExportTensorboardTimeSeriesData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchCreateTensorboardTimeSeries_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ExportTensorboardTimeSeriesData_sync", "segments": [ { - "end": 57, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 52, "start": 27, "type": "SHORT" }, @@ -38869,22 +43819,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 52, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_create_tensorboard_time_series_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py" }, { "canonical": true, @@ -38894,22 +43844,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.batch_read_tensorboard_time_series_data", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard_experiment", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "BatchReadTensorboardTimeSeriesData" + "shortName": "GetTensorboardExperiment" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest" }, { - "name": "tensorboard", + "name": "name", "type": "str" }, { @@ -38925,22 +43875,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse", - "shortName": "batch_read_tensorboard_time_series_data" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "get_tensorboard_experiment" }, - "description": "Sample for BatchReadTensorboardTimeSeriesData", - "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py", + "description": "Sample for GetTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardExperiment_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -38950,22 +43900,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_async.py" }, { "canonical": true, @@ -38974,22 +43924,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.batch_read_tensorboard_time_series_data", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard_experiment", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.BatchReadTensorboardTimeSeriesData", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "BatchReadTensorboardTimeSeriesData" + "shortName": "GetTensorboardExperiment" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest" }, { - "name": "tensorboard", + "name": "name", "type": "str" }, { @@ -39005,22 +43955,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.BatchReadTensorboardTimeSeriesDataResponse", - "shortName": "batch_read_tensorboard_time_series_data" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "get_tensorboard_experiment" }, - "description": "Sample for BatchReadTensorboardTimeSeriesData", - "file": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py", + "description": "Sample for GetTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_BatchReadTensorboardTimeSeriesData_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardExperiment_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -39030,22 +43980,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_batch_read_tensorboard_time_series_data_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_sync.py" }, { "canonical": true, @@ -39055,30 +44005,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard_experiment", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard_run", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "CreateTensorboardExperiment" + "shortName": "GetTensorboardRun" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "tensorboard_experiment", - "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest" }, { - "name": "tensorboard_experiment_id", + "name": "name", "type": "str" }, { @@ -39094,22 +44036,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", - "shortName": "create_tensorboard_experiment" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "get_tensorboard_run" }, - "description": "Sample for CreateTensorboardExperiment", - "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_async.py", + "description": "Sample for GetTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardExperiment_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardRun_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -39119,22 +44061,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_async.py" }, { "canonical": true, @@ -39143,30 +44085,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard_experiment", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard_run", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardExperiment", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "CreateTensorboardExperiment" + "shortName": "GetTensorboardRun" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardExperimentRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "tensorboard_experiment", - "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest" }, { - "name": "tensorboard_experiment_id", + "name": "name", "type": "str" }, { @@ -39182,22 +44116,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", - "shortName": "create_tensorboard_experiment" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "get_tensorboard_run" }, - "description": "Sample for CreateTensorboardExperiment", - "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_sync.py", + "description": "Sample for GetTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardExperiment_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardRun_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -39207,22 +44141,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 46, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 49, - "start": 47, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 50, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_experiment_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_sync.py" }, { "canonical": true, @@ -39232,30 +44166,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard_run", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard_time_series", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "CreateTensorboardRun" + "shortName": "GetTensorboardTimeSeries" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "tensorboard_run", - "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest" }, { - "name": "tensorboard_run_id", + "name": "name", "type": "str" }, { @@ -39271,22 +44197,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", - "shortName": "create_tensorboard_run" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "get_tensorboard_time_series" }, - "description": "Sample for CreateTensorboardRun", - "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_async.py", + "description": "Sample for GetTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardRun_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardTimeSeries_async", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -39296,22 +44222,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_async.py" }, { "canonical": true, @@ -39320,30 +44246,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard_run", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard_time_series", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardRun", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "CreateTensorboardRun" + "shortName": "GetTensorboardTimeSeries" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRunRequest" - }, - { - "name": "parent", - "type": "str" - }, - { - "name": "tensorboard_run", - "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest" }, { - "name": "tensorboard_run_id", + "name": "name", "type": "str" }, { @@ -39359,22 +44277,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", - "shortName": "create_tensorboard_run" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "get_tensorboard_time_series" }, - "description": "Sample for CreateTensorboardRun", - "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_sync.py", + "description": "Sample for GetTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardRun_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardTimeSeries_sync", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -39384,22 +44302,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_run_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_sync.py" }, { "canonical": true, @@ -39409,28 +44327,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard_time_series", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "CreateTensorboardTimeSeries" + "shortName": "GetTensorboard" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "tensorboard_time_series", - "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -39444,22 +44358,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", - "shortName": "create_tensorboard_time_series" + "resultType": "google.cloud.aiplatform_v1beta1.types.Tensorboard", + "shortName": "get_tensorboard" }, - "description": "Sample for CreateTensorboardTimeSeries", - "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_async.py", + "description": "Sample for GetTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardTimeSeries_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboard_async", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -39469,22 +44383,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_async.py" }, { "canonical": true, @@ -39493,28 +44407,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard_time_series", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboardTimeSeries", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "CreateTensorboardTimeSeries" + "shortName": "GetTensorboard" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardTimeSeriesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest" }, { - "name": "parent", + "name": "name", "type": "str" }, - { - "name": "tensorboard_time_series", - "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -39528,22 +44438,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", - "shortName": "create_tensorboard_time_series" + "resultType": "google.cloud.aiplatform_v1beta1.types.Tensorboard", + "shortName": "get_tensorboard" }, - "description": "Sample for CreateTensorboardTimeSeries", - "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_sync.py", + "description": "Sample for GetTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboardTimeSeries_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboard_sync", "segments": [ { - "end": 56, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 51, "start": 27, "type": "SHORT" }, @@ -39553,22 +44463,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_time_series_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_sync.py" }, { "canonical": true, @@ -39578,28 +44488,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.create_tensorboard", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboard_experiments", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "CreateTensorboard" + "shortName": "ListTensorboardExperiments" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "tensorboard", - "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -39613,22 +44519,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "create_tensorboard" + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager", + "shortName": "list_tensorboard_experiments" }, - "description": "Sample for CreateTensorboard", - "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_async.py", + "description": "Sample for ListTensorboardExperiments", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardExperiments_async", "segments": [ { - "end": 59, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 52, "start": 27, "type": "SHORT" }, @@ -39638,22 +44544,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_async.py" }, { "canonical": true, @@ -39662,28 +44568,24 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.create_tensorboard", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboard_experiments", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.CreateTensorboard", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "CreateTensorboard" + "shortName": "ListTensorboardExperiments" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.CreateTensorboardRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest" }, { "name": "parent", "type": "str" }, - { - "name": "tensorboard", - "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" - }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -39697,22 +44599,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "create_tensorboard" + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager", + "shortName": "list_tensorboard_experiments" }, - "description": "Sample for CreateTensorboard", - "file": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py", + "description": "Sample for ListTensorboardExperiments", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_CreateTensorboard_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardExperiments_sync", "segments": [ { - "end": 59, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 59, + "end": 52, "start": 27, "type": "SHORT" }, @@ -39722,22 +44624,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 56, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 60, - "start": 57, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_create_tensorboard_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_sync.py" }, { "canonical": true, @@ -39747,22 +44649,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard_experiment", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboard_runs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "DeleteTensorboardExperiment" + "shortName": "ListTensorboardRuns" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -39778,22 +44680,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_tensorboard_experiment" + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager", + "shortName": "list_tensorboard_runs" }, - "description": "Sample for DeleteTensorboardExperiment", - "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_async.py", + "description": "Sample for ListTensorboardRuns", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardExperiment_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardRuns_async", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -39808,17 +44710,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_async.py" }, { "canonical": true, @@ -39827,22 +44729,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard_experiment", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboard_runs", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardExperiment", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "DeleteTensorboardExperiment" + "shortName": "ListTensorboardRuns" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardExperimentRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -39858,22 +44760,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_tensorboard_experiment" + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsPager", + "shortName": "list_tensorboard_runs" }, - "description": "Sample for DeleteTensorboardExperiment", - "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py", + "description": "Sample for ListTensorboardRuns", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardExperiment_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardRuns_sync", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -39888,17 +44790,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_experiment_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_sync.py" }, { "canonical": true, @@ -39908,22 +44810,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard_run", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboard_time_series", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "DeleteTensorboardRun" + "shortName": "ListTensorboardTimeSeries" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -39939,22 +44841,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_tensorboard_run" + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager", + "shortName": "list_tensorboard_time_series" }, - "description": "Sample for DeleteTensorboardRun", - "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_async.py", + "description": "Sample for ListTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardRun_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardTimeSeries_async", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -39969,17 +44871,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_async.py" }, { "canonical": true, @@ -39988,22 +44890,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard_run", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboard_time_series", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardRun", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "DeleteTensorboardRun" + "shortName": "ListTensorboardTimeSeries" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRunRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -40019,22 +44921,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_tensorboard_run" + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager", + "shortName": "list_tensorboard_time_series" }, - "description": "Sample for DeleteTensorboardRun", - "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_sync.py", + "description": "Sample for ListTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardRun_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardTimeSeries_sync", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -40049,17 +44951,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_run_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_sync.py" }, { "canonical": true, @@ -40069,22 +44971,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard_time_series", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboards", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "DeleteTensorboardTimeSeries" + "shortName": "ListTensorboards" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -40100,22 +45002,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_tensorboard_time_series" + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager", + "shortName": "list_tensorboards" }, - "description": "Sample for DeleteTensorboardTimeSeries", - "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_async.py", + "description": "Sample for ListTensorboards", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardTimeSeries_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboards_async", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -40130,17 +45032,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_async.py" }, { "canonical": true, @@ -40149,22 +45051,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard_time_series", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboards", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboardTimeSeries", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "DeleteTensorboardTimeSeries" + "shortName": "ListTensorboards" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardTimeSeriesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest" }, { - "name": "name", + "name": "parent", "type": "str" }, { @@ -40180,22 +45082,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_tensorboard_time_series" + "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsPager", + "shortName": "list_tensorboards" }, - "description": "Sample for DeleteTensorboardTimeSeries", - "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py", + "description": "Sample for ListTensorboards", + "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboardTimeSeries_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboards_sync", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -40210,17 +45112,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_time_series_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_sync.py" }, { "canonical": true, @@ -40230,22 +45132,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.delete_tensorboard", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_blob_data", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "DeleteTensorboard" + "shortName": "ReadTensorboardBlobData" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest" }, { - "name": "name", + "name": "time_series", "type": "str" }, { @@ -40261,22 +45163,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "delete_tensorboard" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]", + "shortName": "read_tensorboard_blob_data" }, - "description": "Sample for DeleteTensorboard", - "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_async.py", + "description": "Sample for ReadTensorboardBlobData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboard_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_async", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -40291,17 +45193,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_async.py" }, { "canonical": true, @@ -40310,22 +45212,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.delete_tensorboard", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_blob_data", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.DeleteTensorboard", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "DeleteTensorboard" + "shortName": "ReadTensorboardBlobData" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.DeleteTensorboardRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest" }, { - "name": "name", + "name": "time_series", "type": "str" }, { @@ -40341,22 +45243,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "delete_tensorboard" + "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]", + "shortName": "read_tensorboard_blob_data" }, - "description": "Sample for DeleteTensorboard", - "file": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_sync.py", + "description": "Sample for ReadTensorboardBlobData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_DeleteTensorboard_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_sync", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -40371,17 +45273,17 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 52, + "end": 48, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_delete_tensorboard_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py" }, { "canonical": true, @@ -40391,22 +45293,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.export_tensorboard_time_series_data", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_size", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardSize", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "ExportTensorboardTimeSeriesData" + "shortName": "ReadTensorboardSize" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardSizeRequest" }, { - "name": "tensorboard_time_series", + "name": "tensorboard", "type": "str" }, { @@ -40422,22 +45324,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataAsyncPager", - "shortName": "export_tensorboard_time_series_data" + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardSizeResponse", + "shortName": "read_tensorboard_size" }, - "description": "Sample for ExportTensorboardTimeSeriesData", - "file": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py", + "description": "Sample for ReadTensorboardSize", + "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_size_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ExportTensorboardTimeSeriesData_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardSize_async", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -40457,12 +45359,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_size_async.py" }, { "canonical": true, @@ -40471,22 +45373,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.export_tensorboard_time_series_data", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_size", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ExportTensorboardTimeSeriesData", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardSize", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "ExportTensorboardTimeSeriesData" + "shortName": "ReadTensorboardSize" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ExportTensorboardTimeSeriesDataRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardSizeRequest" }, { - "name": "tensorboard_time_series", + "name": "tensorboard", "type": "str" }, { @@ -40502,22 +45404,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ExportTensorboardTimeSeriesDataPager", - "shortName": "export_tensorboard_time_series_data" + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardSizeResponse", + "shortName": "read_tensorboard_size" }, - "description": "Sample for ExportTensorboardTimeSeriesData", - "file": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py", + "description": "Sample for ReadTensorboardSize", + "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_size_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ExportTensorboardTimeSeriesData_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardSize_sync", "segments": [ { - "end": 52, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 51, "start": 27, "type": "SHORT" }, @@ -40537,12 +45439,12 @@ "type": "REQUEST_EXECUTION" }, { - "end": 53, + "end": 52, "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_export_tensorboard_time_series_data_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_size_sync.py" }, { "canonical": true, @@ -40552,22 +45454,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard_experiment", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_time_series_data", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "GetTensorboardExperiment" + "shortName": "ReadTensorboardTimeSeriesData" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest" }, { - "name": "name", + "name": "tensorboard_time_series", "type": "str" }, { @@ -40583,14 +45485,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", - "shortName": "get_tensorboard_experiment" + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse", + "shortName": "read_tensorboard_time_series_data" }, - "description": "Sample for GetTensorboardExperiment", - "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_async.py", + "description": "Sample for ReadTensorboardTimeSeriesData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardExperiment_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardTimeSeriesData_async", "segments": [ { "end": 51, @@ -40623,7 +45525,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py" }, { "canonical": true, @@ -40632,22 +45534,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard_experiment", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_time_series_data", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardExperiment", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "GetTensorboardExperiment" + "shortName": "ReadTensorboardTimeSeriesData" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardExperimentRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest" }, { - "name": "name", + "name": "tensorboard_time_series", "type": "str" }, { @@ -40663,14 +45565,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", - "shortName": "get_tensorboard_experiment" + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse", + "shortName": "read_tensorboard_time_series_data" }, - "description": "Sample for GetTensorboardExperiment", - "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_sync.py", + "description": "Sample for ReadTensorboardTimeSeriesData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardExperiment_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardTimeSeriesData_sync", "segments": [ { "end": 51, @@ -40703,7 +45605,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_experiment_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py" }, { "canonical": true, @@ -40713,22 +45615,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard_run", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_usage", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardUsage", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "GetTensorboardRun" + "shortName": "ReadTensorboardUsage" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardUsageRequest" }, { - "name": "name", + "name": "tensorboard", "type": "str" }, { @@ -40744,14 +45646,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", - "shortName": "get_tensorboard_run" + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardUsageResponse", + "shortName": "read_tensorboard_usage" }, - "description": "Sample for GetTensorboardRun", - "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_async.py", + "description": "Sample for ReadTensorboardUsage", + "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_usage_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardRun_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardUsage_async", "segments": [ { "end": 51, @@ -40784,7 +45686,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_usage_async.py" }, { "canonical": true, @@ -40793,22 +45695,22 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard_run", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_usage", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardRun", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardUsage", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "GetTensorboardRun" + "shortName": "ReadTensorboardUsage" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRunRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardUsageRequest" }, { - "name": "name", + "name": "tensorboard", "type": "str" }, { @@ -40824,14 +45726,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", - "shortName": "get_tensorboard_run" + "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardUsageResponse", + "shortName": "read_tensorboard_usage" }, - "description": "Sample for GetTensorboardRun", - "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_sync.py", + "description": "Sample for ReadTensorboardUsage", + "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_usage_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardRun_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardUsage_sync", "segments": [ { "end": 51, @@ -40864,7 +45766,7 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_run_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_usage_sync.py" }, { "canonical": true, @@ -40874,23 +45776,27 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard_time_series", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard_experiment", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "GetTensorboardTimeSeries" + "shortName": "UpdateTensorboardExperiment" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest" }, { - "name": "name", - "type": "str" + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -40905,22 +45811,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", - "shortName": "get_tensorboard_time_series" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "update_tensorboard_experiment" }, - "description": "Sample for GetTensorboardTimeSeries", - "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_async.py", + "description": "Sample for UpdateTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardTimeSeries_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardExperiment_async", "segments": [ { - "end": 51, + "end": 50, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 50, "start": 27, "type": "SHORT" }, @@ -40930,22 +45836,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 47, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 51, + "start": 48, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_async.py" }, { "canonical": true, @@ -40954,23 +45860,27 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard_time_series", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard_experiment", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboardTimeSeries", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "GetTensorboardTimeSeries" + "shortName": "UpdateTensorboardExperiment" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardTimeSeriesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest" }, { - "name": "name", - "type": "str" + "name": "tensorboard_experiment", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -40985,22 +45895,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", - "shortName": "get_tensorboard_time_series" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", + "shortName": "update_tensorboard_experiment" }, - "description": "Sample for GetTensorboardTimeSeries", - "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_sync.py", + "description": "Sample for UpdateTensorboardExperiment", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboardTimeSeries_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardExperiment_sync", "segments": [ { - "end": 51, + "end": 50, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 50, "start": 27, "type": "SHORT" }, @@ -41010,22 +45920,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 44, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 47, + "start": 45, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 51, + "start": 48, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_time_series_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_sync.py" }, { "canonical": true, @@ -41035,23 +45945,27 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.get_tensorboard", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard_run", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "GetTensorboard" + "shortName": "UpdateTensorboardRun" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest" }, { - "name": "name", - "type": "str" + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -41066,22 +45980,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Tensorboard", - "shortName": "get_tensorboard" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "update_tensorboard_run" }, - "description": "Sample for GetTensorboard", - "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_async.py", + "description": "Sample for UpdateTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboard_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardRun_async", "segments": [ { - "end": 51, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 54, "start": 27, "type": "SHORT" }, @@ -41091,22 +46005,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 51, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_async.py" }, { "canonical": true, @@ -41115,23 +46029,27 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.get_tensorboard", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard_run", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.GetTensorboard", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "GetTensorboard" + "shortName": "UpdateTensorboardRun" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.GetTensorboardRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest" }, { - "name": "name", - "type": "str" + "name": "tensorboard_run", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -41146,22 +46064,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.Tensorboard", - "shortName": "get_tensorboard" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", + "shortName": "update_tensorboard_run" }, - "description": "Sample for GetTensorboard", - "file": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_sync.py", + "description": "Sample for UpdateTensorboardRun", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_GetTensorboard_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardRun_sync", "segments": [ { - "end": 51, + "end": 54, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 54, "start": 27, "type": "SHORT" }, @@ -41171,22 +46089,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 51, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 55, + "start": 52, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_get_tensorboard_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_sync.py" }, { "canonical": true, @@ -41196,23 +46114,27 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboard_experiments", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard_time_series", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "ListTensorboardExperiments" + "shortName": "UpdateTensorboardTimeSeries" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest" }, { - "name": "parent", - "type": "str" + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -41227,22 +46149,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsAsyncPager", - "shortName": "list_tensorboard_experiments" - }, - "description": "Sample for ListTensorboardExperiments", - "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_async.py", + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "update_tensorboard_time_series" + }, + "description": "Sample for UpdateTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardExperiments_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardTimeSeries_async", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -41252,22 +46174,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 52, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_async.py" }, { "canonical": true, @@ -41276,23 +46198,27 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboard_experiments", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard_time_series", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardExperiments", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "ListTensorboardExperiments" + "shortName": "UpdateTensorboardTimeSeries" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardExperimentsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest" }, { - "name": "parent", - "type": "str" + "name": "tensorboard_time_series", + "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -41307,22 +46233,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardExperimentsPager", - "shortName": "list_tensorboard_experiments" + "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", + "shortName": "update_tensorboard_time_series" }, - "description": "Sample for ListTensorboardExperiments", - "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_sync.py", + "description": "Sample for UpdateTensorboardTimeSeries", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardExperiments_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardTimeSeries_sync", "segments": [ { - "end": 52, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 55, "start": 27, "type": "SHORT" }, @@ -41332,22 +46258,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 52, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_experiments_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_sync.py" }, { "canonical": true, @@ -41357,23 +46283,27 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboard_runs", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "ListTensorboardRuns" + "shortName": "UpdateTensorboard" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest" }, { - "name": "parent", - "type": "str" + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -41388,22 +46318,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsAsyncPager", - "shortName": "list_tensorboard_runs" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "update_tensorboard" }, - "description": "Sample for ListTensorboardRuns", - "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_async.py", + "description": "Sample for UpdateTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardRuns_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboard_async", "segments": [ { - "end": 52, + "end": 58, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 58, "start": 27, "type": "SHORT" }, @@ -41413,22 +46343,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 55, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 59, + "start": 56, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_async.py" }, { "canonical": true, @@ -41437,23 +46367,27 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboard_runs", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardRuns", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "ListTensorboardRuns" + "shortName": "UpdateTensorboard" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardRunsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest" }, { - "name": "parent", - "type": "str" + "name": "tensorboard", + "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + }, + { + "name": "update_mask", + "type": "google.protobuf.field_mask_pb2.FieldMask" }, { "name": "retry", @@ -41468,22 +46402,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardRunsPager", - "shortName": "list_tensorboard_runs" + "resultType": "google.api_core.operation.Operation", + "shortName": "update_tensorboard" }, - "description": "Sample for ListTensorboardRuns", - "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_sync.py", + "description": "Sample for UpdateTensorboard", + "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardRuns_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboard_sync", "segments": [ { - "end": 52, + "end": 58, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 58, "start": 27, "type": "SHORT" }, @@ -41493,22 +46427,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 48, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 55, + "start": 49, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 59, + "start": 56, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_runs_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_sync.py" }, { "canonical": true, @@ -41518,24 +46452,28 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboard_time_series", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.write_tensorboard_experiment_data", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "ListTensorboardTimeSeries" + "shortName": "WriteTensorboardExperimentData" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest" }, { - "name": "parent", + "name": "tensorboard_experiment", "type": "str" }, + { + "name": "write_run_data_requests", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -41549,22 +46487,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesAsyncPager", - "shortName": "list_tensorboard_time_series" + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse", + "shortName": "write_tensorboard_experiment_data" }, - "description": "Sample for ListTensorboardTimeSeries", - "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_async.py", + "description": "Sample for WriteTensorboardExperimentData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardTimeSeries_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardExperimentData_async", "segments": [ { - "end": 52, + "end": 57, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 57, "start": 27, "type": "SHORT" }, @@ -41574,22 +46512,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 54, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 58, + "start": 55, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py" }, { "canonical": true, @@ -41598,24 +46536,28 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboard_time_series", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.write_tensorboard_experiment_data", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboardTimeSeries", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "ListTensorboardTimeSeries" + "shortName": "WriteTensorboardExperimentData" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardTimeSeriesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest" }, { - "name": "parent", + "name": "tensorboard_experiment", "type": "str" }, + { + "name": "write_run_data_requests", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -41629,22 +46571,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardTimeSeriesPager", - "shortName": "list_tensorboard_time_series" + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse", + "shortName": "write_tensorboard_experiment_data" }, - "description": "Sample for ListTensorboardTimeSeries", - "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_sync.py", + "description": "Sample for WriteTensorboardExperimentData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboardTimeSeries_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardExperimentData_sync", "segments": [ { - "end": 52, + "end": 57, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 57, "start": 27, "type": "SHORT" }, @@ -41654,22 +46596,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 51, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 54, + "start": 52, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 58, + "start": 55, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboard_time_series_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py" }, { "canonical": true, @@ -41679,24 +46621,28 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", "shortName": "TensorboardServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.list_tensorboards", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.write_tensorboard_run_data", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "ListTensorboards" + "shortName": "WriteTensorboardRunData" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest" }, { - "name": "parent", + "name": "tensorboard_run", "type": "str" }, + { + "name": "time_series_data", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -41710,22 +46656,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsAsyncPager", - "shortName": "list_tensorboards" + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse", + "shortName": "write_tensorboard_run_data" }, - "description": "Sample for ListTensorboards", - "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_async.py", + "description": "Sample for WriteTensorboardRunData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboards_async", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardRunData_async", "segments": [ { - "end": 52, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 56, "start": 27, "type": "SHORT" }, @@ -41735,22 +46681,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_async.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_async.py" }, { "canonical": true, @@ -41759,24 +46705,28 @@ "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", "shortName": "TensorboardServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.list_tensorboards", + "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.write_tensorboard_run_data", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ListTensorboards", + "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData", "service": { "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", "shortName": "TensorboardService" }, - "shortName": "ListTensorboards" + "shortName": "WriteTensorboardRunData" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ListTensorboardsRequest" + "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest" }, { - "name": "parent", + "name": "tensorboard_run", "type": "str" }, + { + "name": "time_series_data", + "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -41790,22 +46740,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.services.tensorboard_service.pagers.ListTensorboardsPager", - "shortName": "list_tensorboards" + "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse", + "shortName": "write_tensorboard_run_data" }, - "description": "Sample for ListTensorboards", - "file": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_sync.py", + "description": "Sample for WriteTensorboardRunData", + "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ListTensorboards_sync", + "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardRunData_sync", "segments": [ { - "end": 52, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 56, "start": 27, "type": "SHORT" }, @@ -41815,49 +46765,53 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_list_tensorboards_sync.py" + "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", - "shortName": "TensorboardServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient", + "shortName": "VertexRagDataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_blob_data", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient.create_rag_corpus", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.CreateRagCorpus", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "ReadTensorboardBlobData" + "shortName": "CreateRagCorpus" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateRagCorpusRequest" }, { - "name": "time_series", + "name": "parent", "type": "str" }, + { + "name": "rag_corpus", + "type": "google.cloud.aiplatform_v1beta1.types.RagCorpus" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -41871,22 +46825,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]", - "shortName": "read_tensorboard_blob_data" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_rag_corpus" }, - "description": "Sample for ReadTensorboardBlobData", - "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_async.py", + "description": "Sample for CreateRagCorpus", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_create_rag_corpus_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_async", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_CreateRagCorpus_async", "segments": [ { - "end": 52, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 59, "start": 27, "type": "SHORT" }, @@ -41896,48 +46850,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_async.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_create_rag_corpus_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", - "shortName": "TensorboardServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient", + "shortName": "VertexRagDataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_blob_data", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient.create_rag_corpus", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardBlobData", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.CreateRagCorpus", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "ReadTensorboardBlobData" + "shortName": "CreateRagCorpus" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataRequest" + "type": "google.cloud.aiplatform_v1beta1.types.CreateRagCorpusRequest" }, { - "name": "time_series", + "name": "parent", "type": "str" }, + { + "name": "rag_corpus", + "type": "google.cloud.aiplatform_v1beta1.types.RagCorpus" + }, { "name": "retry", "type": "google.api_core.retry.Retry" @@ -41951,22 +46909,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "Iterable[google.cloud.aiplatform_v1beta1.types.ReadTensorboardBlobDataResponse]", - "shortName": "read_tensorboard_blob_data" + "resultType": "google.api_core.operation.Operation", + "shortName": "create_rag_corpus" }, - "description": "Sample for ReadTensorboardBlobData", - "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py", + "description": "Sample for CreateRagCorpus", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_create_rag_corpus_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardBlobData_sync", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_CreateRagCorpus_sync", "segments": [ { - "end": 52, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 52, + "end": 59, "start": 27, "type": "SHORT" }, @@ -41976,47 +46934,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 45, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 48, - "start": 46, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 53, - "start": 49, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_blob_data_sync.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_create_rag_corpus_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", - "shortName": "TensorboardServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient", + "shortName": "VertexRagDataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_size", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient.delete_rag_corpus", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardSize", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.DeleteRagCorpus", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "ReadTensorboardSize" + "shortName": "DeleteRagCorpus" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardSizeRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteRagCorpusRequest" }, { - "name": "tensorboard", + "name": "name", "type": "str" }, { @@ -42032,22 +46990,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardSizeResponse", - "shortName": "read_tensorboard_size" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_rag_corpus" }, - "description": "Sample for ReadTensorboardSize", - "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_size_async.py", + "description": "Sample for DeleteRagCorpus", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_corpus_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardSize_async", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_DeleteRagCorpus_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -42062,41 +47020,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_size_async.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_corpus_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", - "shortName": "TensorboardServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient", + "shortName": "VertexRagDataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_size", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient.delete_rag_corpus", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardSize", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.DeleteRagCorpus", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "ReadTensorboardSize" + "shortName": "DeleteRagCorpus" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardSizeRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteRagCorpusRequest" }, { - "name": "tensorboard", + "name": "name", "type": "str" }, { @@ -42112,22 +47070,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardSizeResponse", - "shortName": "read_tensorboard_size" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_rag_corpus" }, - "description": "Sample for ReadTensorboardSize", - "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_size_sync.py", + "description": "Sample for DeleteRagCorpus", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_corpus_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardSize_sync", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_DeleteRagCorpus_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -42142,42 +47100,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_size_sync.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_corpus_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", - "shortName": "TensorboardServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient", + "shortName": "VertexRagDataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_time_series_data", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient.delete_rag_file", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.DeleteRagFile", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "ReadTensorboardTimeSeriesData" + "shortName": "DeleteRagFile" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteRagFileRequest" }, { - "name": "tensorboard_time_series", + "name": "name", "type": "str" }, { @@ -42193,22 +47151,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse", - "shortName": "read_tensorboard_time_series_data" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "delete_rag_file" }, - "description": "Sample for ReadTensorboardTimeSeriesData", - "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py", + "description": "Sample for DeleteRagFile", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_file_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardTimeSeriesData_async", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_DeleteRagFile_async", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -42223,41 +47181,41 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_async.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_file_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", - "shortName": "TensorboardServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient", + "shortName": "VertexRagDataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_time_series_data", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient.delete_rag_file", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardTimeSeriesData", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.DeleteRagFile", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "ReadTensorboardTimeSeriesData" + "shortName": "DeleteRagFile" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataRequest" + "type": "google.cloud.aiplatform_v1beta1.types.DeleteRagFileRequest" }, { - "name": "tensorboard_time_series", + "name": "name", "type": "str" }, { @@ -42273,22 +47231,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardTimeSeriesDataResponse", - "shortName": "read_tensorboard_time_series_data" + "resultType": "google.api_core.operation.Operation", + "shortName": "delete_rag_file" }, - "description": "Sample for ReadTensorboardTimeSeriesData", - "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py", + "description": "Sample for DeleteRagFile", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_file_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardTimeSeriesData_sync", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_DeleteRagFile_sync", "segments": [ { - "end": 51, + "end": 55, "start": 27, "type": "FULL" }, { - "end": 51, + "end": 55, "start": 27, "type": "SHORT" }, @@ -42303,42 +47261,42 @@ "type": "REQUEST_INITIALIZATION" }, { - "end": 48, + "end": 52, "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 52, - "start": 49, + "end": 56, + "start": 53, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_time_series_data_sync.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_delete_rag_file_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", - "shortName": "TensorboardServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient", + "shortName": "VertexRagDataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.read_tensorboard_usage", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient.get_rag_corpus", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardUsage", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.GetRagCorpus", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "ReadTensorboardUsage" + "shortName": "GetRagCorpus" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardUsageRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetRagCorpusRequest" }, { - "name": "tensorboard", + "name": "name", "type": "str" }, { @@ -42354,14 +47312,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardUsageResponse", - "shortName": "read_tensorboard_usage" + "resultType": "google.cloud.aiplatform_v1beta1.types.RagCorpus", + "shortName": "get_rag_corpus" }, - "description": "Sample for ReadTensorboardUsage", - "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_usage_async.py", + "description": "Sample for GetRagCorpus", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_corpus_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardUsage_async", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_GetRagCorpus_async", "segments": [ { "end": 51, @@ -42394,31 +47352,31 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_usage_async.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_corpus_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", - "shortName": "TensorboardServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient", + "shortName": "VertexRagDataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.read_tensorboard_usage", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient.get_rag_corpus", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.ReadTensorboardUsage", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.GetRagCorpus", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "ReadTensorboardUsage" + "shortName": "GetRagCorpus" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardUsageRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetRagCorpusRequest" }, { - "name": "tensorboard", + "name": "name", "type": "str" }, { @@ -42434,14 +47392,14 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.ReadTensorboardUsageResponse", - "shortName": "read_tensorboard_usage" + "resultType": "google.cloud.aiplatform_v1beta1.types.RagCorpus", + "shortName": "get_rag_corpus" }, - "description": "Sample for ReadTensorboardUsage", - "file": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_usage_sync.py", + "description": "Sample for GetRagCorpus", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_corpus_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_ReadTensorboardUsage_sync", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_GetRagCorpus_sync", "segments": [ { "end": 51, @@ -42474,37 +47432,33 @@ "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_read_tensorboard_usage_sync.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_corpus_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", - "shortName": "TensorboardServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient", + "shortName": "VertexRagDataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard_experiment", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient.get_rag_file", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.GetRagFile", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "UpdateTensorboardExperiment" + "shortName": "GetRagFile" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetRagFileRequest" }, { - "name": "tensorboard_experiment", - "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -42519,22 +47473,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", - "shortName": "update_tensorboard_experiment" + "resultType": "google.cloud.aiplatform_v1beta1.types.RagFile", + "shortName": "get_rag_file" }, - "description": "Sample for UpdateTensorboardExperiment", - "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_async.py", + "description": "Sample for GetRagFile", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_file_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardExperiment_async", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_GetRagFile_async", "segments": [ { - "end": 50, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 51, "start": 27, "type": "SHORT" }, @@ -42544,51 +47498,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_async.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_file_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", - "shortName": "TensorboardServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient", + "shortName": "VertexRagDataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard_experiment", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient.get_rag_file", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardExperiment", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.GetRagFile", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "UpdateTensorboardExperiment" + "shortName": "GetRagFile" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardExperimentRequest" + "type": "google.cloud.aiplatform_v1beta1.types.GetRagFileRequest" }, { - "name": "tensorboard_experiment", - "type": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "name", + "type": "str" }, { "name": "retry", @@ -42603,22 +47553,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardExperiment", - "shortName": "update_tensorboard_experiment" + "resultType": "google.cloud.aiplatform_v1beta1.types.RagFile", + "shortName": "get_rag_file" }, - "description": "Sample for UpdateTensorboardExperiment", - "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_sync.py", + "description": "Sample for GetRagFile", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_file_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardExperiment_sync", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_GetRagFile_sync", "segments": [ { - "end": 50, + "end": 51, "start": 27, "type": "FULL" }, { - "end": 50, + "end": 51, "start": 27, "type": "SHORT" }, @@ -42628,52 +47578,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 44, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 47, - "start": 45, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 51, - "start": 48, + "end": 52, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_experiment_sync.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_get_rag_file_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", - "shortName": "TensorboardServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient", + "shortName": "VertexRagDataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard_run", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient.import_rag_files", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "UpdateTensorboardRun" + "shortName": "ImportRagFiles" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ImportRagFilesRequest" }, { - "name": "tensorboard_run", - "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "import_rag_files_config", + "type": "google.cloud.aiplatform_v1beta1.types.ImportRagFilesConfig" }, { "name": "retry", @@ -42688,22 +47638,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", - "shortName": "update_tensorboard_run" + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "import_rag_files" }, - "description": "Sample for UpdateTensorboardRun", - "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_async.py", + "description": "Sample for ImportRagFiles", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_import_rag_files_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardRun_async", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_ImportRagFiles_async", "segments": [ { - "end": 54, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 59, "start": 27, "type": "SHORT" }, @@ -42713,51 +47663,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 49, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_async.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_import_rag_files_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", - "shortName": "TensorboardServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient", + "shortName": "VertexRagDataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard_run", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient.import_rag_files", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardRun", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.ImportRagFiles", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "UpdateTensorboardRun" + "shortName": "ImportRagFiles" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRunRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ImportRagFilesRequest" }, { - "name": "tensorboard_run", - "type": "google.cloud.aiplatform_v1beta1.types.TensorboardRun" + "name": "parent", + "type": "str" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "import_rag_files_config", + "type": "google.cloud.aiplatform_v1beta1.types.ImportRagFilesConfig" }, { "name": "retry", @@ -42772,22 +47722,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardRun", - "shortName": "update_tensorboard_run" + "resultType": "google.api_core.operation.Operation", + "shortName": "import_rag_files" }, - "description": "Sample for UpdateTensorboardRun", - "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_sync.py", + "description": "Sample for ImportRagFiles", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_import_rag_files_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardRun_sync", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_ImportRagFiles_sync", "segments": [ { - "end": 54, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 54, + "end": 59, "start": 27, "type": "SHORT" }, @@ -42797,52 +47747,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 49, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 51, - "start": 49, + "end": 56, + "start": 50, "type": "REQUEST_EXECUTION" }, { - "end": 55, - "start": 52, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_run_sync.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_import_rag_files_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", - "shortName": "TensorboardServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient", + "shortName": "VertexRagDataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard_time_series", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient.list_rag_corpora", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagCorpora", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "UpdateTensorboardTimeSeries" + "shortName": "ListRagCorpora" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListRagCorporaRequest" }, { - "name": "tensorboard_time_series", - "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "parent", + "type": "str" }, { "name": "retry", @@ -42857,22 +47803,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", - "shortName": "update_tensorboard_time_series" + "resultType": "google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.pagers.ListRagCorporaAsyncPager", + "shortName": "list_rag_corpora" }, - "description": "Sample for UpdateTensorboardTimeSeries", - "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_async.py", + "description": "Sample for ListRagCorpora", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_corpora_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardTimeSeries_async", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_ListRagCorpora_async", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -42882,51 +47828,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_async.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_corpora_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", - "shortName": "TensorboardServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient", + "shortName": "VertexRagDataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard_time_series", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient.list_rag_corpora", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboardTimeSeries", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagCorpora", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "UpdateTensorboardTimeSeries" + "shortName": "ListRagCorpora" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardTimeSeriesRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListRagCorporaRequest" }, { - "name": "tensorboard_time_series", - "type": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "parent", + "type": "str" }, { "name": "retry", @@ -42941,22 +47883,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.TensorboardTimeSeries", - "shortName": "update_tensorboard_time_series" + "resultType": "google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.pagers.ListRagCorporaPager", + "shortName": "list_rag_corpora" }, - "description": "Sample for UpdateTensorboardTimeSeries", - "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_sync.py", + "description": "Sample for ListRagCorpora", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_corpora_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboardTimeSeries_sync", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_ListRagCorpora_sync", "segments": [ { - "end": 55, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 55, + "end": 52, "start": 27, "type": "SHORT" }, @@ -42966,52 +47908,48 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 49, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 52, - "start": 50, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 56, - "start": 53, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_time_series_sync.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_corpora_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", - "shortName": "TensorboardServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient", + "shortName": "VertexRagDataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.update_tensorboard", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient.list_rag_files", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagFiles", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "UpdateTensorboard" + "shortName": "ListRagFiles" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest" + "type": "google.cloud.aiplatform_v1beta1.types.ListRagFilesRequest" }, { - "name": "tensorboard", - "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" - }, - { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "parent", + "type": "str" }, { "name": "retry", @@ -43026,22 +47964,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation_async.AsyncOperation", - "shortName": "update_tensorboard" + "resultType": "google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.pagers.ListRagFilesAsyncPager", + "shortName": "list_rag_files" }, - "description": "Sample for UpdateTensorboard", - "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_async.py", + "description": "Sample for ListRagFiles", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_files_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboard_async", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_ListRagFiles_async", "segments": [ { - "end": 58, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 52, "start": 27, "type": "SHORT" }, @@ -43051,51 +47989,47 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 49, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_async.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_files_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", - "shortName": "TensorboardServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient", + "shortName": "VertexRagDataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.update_tensorboard", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient.list_rag_files", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.UpdateTensorboard", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.ListRagFiles", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "UpdateTensorboard" + "shortName": "ListRagFiles" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.UpdateTensorboardRequest" - }, - { - "name": "tensorboard", - "type": "google.cloud.aiplatform_v1beta1.types.Tensorboard" + "type": "google.cloud.aiplatform_v1beta1.types.ListRagFilesRequest" }, { - "name": "update_mask", - "type": "google.protobuf.field_mask_pb2.FieldMask" + "name": "parent", + "type": "str" }, { "name": "retry", @@ -43110,22 +48044,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.api_core.operation.Operation", - "shortName": "update_tensorboard" + "resultType": "google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.pagers.ListRagFilesPager", + "shortName": "list_rag_files" }, - "description": "Sample for UpdateTensorboard", - "file": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_sync.py", + "description": "Sample for ListRagFiles", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_files_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_UpdateTensorboard_sync", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_ListRagFiles_sync", "segments": [ { - "end": 58, + "end": 52, "start": 27, "type": "FULL" }, { - "end": 58, + "end": 52, "start": 27, "type": "SHORT" }, @@ -43135,52 +48069,56 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 48, + "end": 45, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 55, - "start": 49, + "end": 48, + "start": 46, "type": "REQUEST_EXECUTION" }, { - "end": 59, - "start": 56, + "end": 53, + "start": 49, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_update_tensorboard_sync.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_list_rag_files_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", - "shortName": "TensorboardServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient", + "shortName": "VertexRagDataServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.write_tensorboard_experiment_data", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceAsyncClient.upload_rag_file", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "WriteTensorboardExperimentData" + "shortName": "UploadRagFile" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UploadRagFileRequest" }, { - "name": "tensorboard_experiment", + "name": "parent", "type": "str" }, { - "name": "write_run_data_requests", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]" + "name": "rag_file", + "type": "google.cloud.aiplatform_v1beta1.types.RagFile" + }, + { + "name": "upload_rag_file_config", + "type": "google.cloud.aiplatform_v1beta1.types.UploadRagFileConfig" }, { "name": "retry", @@ -43195,22 +48133,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse", - "shortName": "write_tensorboard_experiment_data" + "resultType": "google.cloud.aiplatform_v1beta1.types.UploadRagFileResponse", + "shortName": "upload_rag_file" }, - "description": "Sample for WriteTensorboardExperimentData", - "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py", + "description": "Sample for UploadRagFile", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_upload_rag_file_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardExperimentData_async", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_UploadRagFile_async", "segments": [ { - "end": 57, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 56, "start": 27, "type": "SHORT" }, @@ -43220,51 +48158,55 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 52, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_async.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_upload_rag_file_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", - "shortName": "TensorboardServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient", + "shortName": "VertexRagDataServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.write_tensorboard_experiment_data", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagDataServiceClient.upload_rag_file", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardExperimentData", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService.UploadRagFile", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagDataService", + "shortName": "VertexRagDataService" }, - "shortName": "WriteTensorboardExperimentData" + "shortName": "UploadRagFile" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataRequest" + "type": "google.cloud.aiplatform_v1beta1.types.UploadRagFileRequest" }, { - "name": "tensorboard_experiment", + "name": "parent", "type": "str" }, { - "name": "write_run_data_requests", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest]" + "name": "rag_file", + "type": "google.cloud.aiplatform_v1beta1.types.RagFile" + }, + { + "name": "upload_rag_file_config", + "type": "google.cloud.aiplatform_v1beta1.types.UploadRagFileConfig" }, { "name": "retry", @@ -43279,22 +48221,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardExperimentDataResponse", - "shortName": "write_tensorboard_experiment_data" + "resultType": "google.cloud.aiplatform_v1beta1.types.UploadRagFileResponse", + "shortName": "upload_rag_file" }, - "description": "Sample for WriteTensorboardExperimentData", - "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py", + "description": "Sample for UploadRagFile", + "file": "aiplatform_v1beta1_generated_vertex_rag_data_service_upload_rag_file_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardExperimentData_sync", + "regionTag": "aiplatform_v1beta1_generated_VertexRagDataService_UploadRagFile_sync", "segments": [ { - "end": 57, + "end": 56, "start": 27, "type": "FULL" }, { - "end": 57, + "end": 56, "start": 27, "type": "SHORT" }, @@ -43304,52 +48246,52 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 51, + "end": 50, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 54, - "start": 52, + "end": 53, + "start": 51, "type": "REQUEST_EXECUTION" }, { - "end": 58, - "start": 55, + "end": 57, + "start": 54, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_experiment_data_sync.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_data_service_upload_rag_file_sync.py" }, { "canonical": true, "clientMethod": { "async": true, "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient", - "shortName": "TensorboardServiceAsyncClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagServiceAsyncClient", + "shortName": "VertexRagServiceAsyncClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceAsyncClient.write_tensorboard_run_data", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagServiceAsyncClient.retrieve_contexts", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagService.RetrieveContexts", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagService", + "shortName": "VertexRagService" }, - "shortName": "WriteTensorboardRunData" + "shortName": "RetrieveContexts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest" + "type": "google.cloud.aiplatform_v1beta1.types.RetrieveContextsRequest" }, { - "name": "tensorboard_run", + "name": "parent", "type": "str" }, { - "name": "time_series_data", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]" + "name": "query", + "type": "google.cloud.aiplatform_v1beta1.types.RagQuery" }, { "name": "retry", @@ -43364,22 +48306,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse", - "shortName": "write_tensorboard_run_data" + "resultType": "google.cloud.aiplatform_v1beta1.types.RetrieveContextsResponse", + "shortName": "retrieve_contexts" }, - "description": "Sample for WriteTensorboardRunData", - "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_async.py", + "description": "Sample for RetrieveContexts", + "file": "aiplatform_v1beta1_generated_vertex_rag_service_retrieve_contexts_async.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardRunData_async", + "regionTag": "aiplatform_v1beta1_generated_VertexRagService_RetrieveContexts_async", "segments": [ { - "end": 56, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 59, "start": 27, "type": "SHORT" }, @@ -43389,51 +48331,51 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 53, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 56, + "start": 54, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_async.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_service_retrieve_contexts_async.py" }, { "canonical": true, "clientMethod": { "client": { - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient", - "shortName": "TensorboardServiceClient" + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagServiceClient", + "shortName": "VertexRagServiceClient" }, - "fullName": "google.cloud.aiplatform_v1beta1.TensorboardServiceClient.write_tensorboard_run_data", + "fullName": "google.cloud.aiplatform_v1beta1.VertexRagServiceClient.retrieve_contexts", "method": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService.WriteTensorboardRunData", + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagService.RetrieveContexts", "service": { - "fullName": "google.cloud.aiplatform.v1beta1.TensorboardService", - "shortName": "TensorboardService" + "fullName": "google.cloud.aiplatform.v1beta1.VertexRagService", + "shortName": "VertexRagService" }, - "shortName": "WriteTensorboardRunData" + "shortName": "RetrieveContexts" }, "parameters": [ { "name": "request", - "type": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataRequest" + "type": "google.cloud.aiplatform_v1beta1.types.RetrieveContextsRequest" }, { - "name": "tensorboard_run", + "name": "parent", "type": "str" }, { - "name": "time_series_data", - "type": "MutableSequence[google.cloud.aiplatform_v1beta1.types.TimeSeriesData]" + "name": "query", + "type": "google.cloud.aiplatform_v1beta1.types.RagQuery" }, { "name": "retry", @@ -43448,22 +48390,22 @@ "type": "Sequence[Tuple[str, str]" } ], - "resultType": "google.cloud.aiplatform_v1beta1.types.WriteTensorboardRunDataResponse", - "shortName": "write_tensorboard_run_data" + "resultType": "google.cloud.aiplatform_v1beta1.types.RetrieveContextsResponse", + "shortName": "retrieve_contexts" }, - "description": "Sample for WriteTensorboardRunData", - "file": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_sync.py", + "description": "Sample for RetrieveContexts", + "file": "aiplatform_v1beta1_generated_vertex_rag_service_retrieve_contexts_sync.py", "language": "PYTHON", "origin": "API_DEFINITION", - "regionTag": "aiplatform_v1beta1_generated_TensorboardService_WriteTensorboardRunData_sync", + "regionTag": "aiplatform_v1beta1_generated_VertexRagService_RetrieveContexts_sync", "segments": [ { - "end": 56, + "end": 59, "start": 27, "type": "FULL" }, { - "end": 56, + "end": 59, "start": 27, "type": "SHORT" }, @@ -43473,22 +48415,22 @@ "type": "CLIENT_INITIALIZATION" }, { - "end": 50, + "end": 53, "start": 41, "type": "REQUEST_INITIALIZATION" }, { - "end": 53, - "start": 51, + "end": 56, + "start": 54, "type": "REQUEST_EXECUTION" }, { - "end": 57, - "start": 54, + "end": 60, + "start": 57, "type": "RESPONSE_HANDLING" } ], - "title": "aiplatform_v1beta1_generated_tensorboard_service_write_tensorboard_run_data_sync.py" + "title": "aiplatform_v1beta1_generated_vertex_rag_service_retrieve_contexts_sync.py" }, { "canonical": true, diff --git a/samples/model-builder/create_custom_job_on_persistent_resource_sample.py b/samples/model-builder/create_custom_job_on_persistent_resource_sample.py new file mode 100644 index 0000000000..5562c6ccbe --- /dev/null +++ b/samples/model-builder/create_custom_job_on_persistent_resource_sample.py @@ -0,0 +1,57 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +from google.cloud import aiplatform + + +# [START aiplatform_sdk_create_custom_job_on_persistent_resource_sample] +def create_custom_job_on_persistent_resource_sample( + project: str, + location: str, + staging_bucket: str, + display_name: str, + container_uri: str, + persistent_resource_id: str, + service_account: Optional[str] = None, +) -> None: + aiplatform.init( + project=project, location=location, staging_bucket=staging_bucket + ) + + worker_pool_specs = [{ + "machine_spec": { + "machine_type": "n1-standard-4", + "accelerator_type": "NVIDIA_TESLA_K80", + "accelerator_count": 1, + }, + "replica_count": 1, + "container_spec": { + "image_uri": container_uri, + "command": [], + "args": [], + }, + }] + + custom_job = aiplatform.CustomJob( + display_name=display_name, + worker_pool_specs=worker_pool_specs, + persistent_resource_id=persistent_resource_id, + ) + + custom_job.run(service_account=service_account) + + +# [END aiplatform_sdk_create_custom_job_on_persistent_resource_sample] diff --git a/samples/model-builder/create_custom_job_on_persistent_resource_sample_test.py b/samples/model-builder/create_custom_job_on_persistent_resource_sample_test.py new file mode 100644 index 0000000000..144ca229b2 --- /dev/null +++ b/samples/model-builder/create_custom_job_on_persistent_resource_sample_test.py @@ -0,0 +1,52 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import create_custom_job_on_persistent_resource_sample +import test_constants as constants + + +_PRESISTENT_RESOURCE_ID = "test_persistent_resource_id" + + +def test_create_custom_job_on_persistent_resource_sample( + mock_sdk_init, + mock_get_custom_job, + mock_run_custom_job, +): + create_custom_job_on_persistent_resource_sample.create_custom_job_on_persistent_resource_sample( + project=constants.PROJECT, + location=constants.LOCATION, + staging_bucket=constants.STAGING_BUCKET, + display_name=constants.DISPLAY_NAME, + container_uri=constants.CONTAINER_URI, + persistent_resource_id=_PRESISTENT_RESOURCE_ID, + service_account=constants.SERVICE_ACCOUNT, + ) + + mock_sdk_init.assert_called_once_with( + project=constants.PROJECT, + location=constants.LOCATION, + staging_bucket=constants.STAGING_BUCKET, + ) + + mock_get_custom_job.assert_called_once_with( + display_name=constants.DISPLAY_NAME, + worker_pool_specs=constants.CUSTOM_JOB_WORKER_POOL_SPECS, + persistent_resource_id=_PRESISTENT_RESOURCE_ID, + ) + + mock_run_custom_job.assert_called_once_with( + service_account=constants.SERVICE_ACCOUNT, + ) diff --git a/setup.py b/setup.py index cff5da3069..4136476719 100644 --- a/setup.py +++ b/setup.py @@ -46,7 +46,9 @@ if package.startswith("google.cloud.aiplatform.preview.vertex_ray") ] -tensorboard_extra_require = ["tensorflow >=2.3.0, <2.15.0"] +# TODO(b/333098166, b/312527978): Add python_version>3.11 when tensorflow>2.16.1 +# works for tensorboard. +tensorboard_extra_require = ["tensorflow >=2.3.0, <2.15.0; python_version<='3.11'"] metadata_extra_require = ["pandas >= 1.0.0", "numpy>=1.15.0"] xai_extra_require = ["tensorflow >=2.3.0, <3.0.0dev"] lit_extra_require = [ @@ -70,7 +72,8 @@ ] datasets_extra_require = [ "pyarrow >= 3.0.0, < 8.0dev; python_version<'3.11'", - "pyarrow >= 10.0.1; python_version>='3.11'", + "pyarrow >= 10.0.1; python_version=='3.11'", + "pyarrow >= 14.0.0; python_version>='3.12'", ] vizier_extra_require = [ @@ -87,7 +90,10 @@ endpoint_extra_require = ["requests >= 2.28.1"] -private_endpoints_extra_require = ["urllib3 >=1.21.1, <1.27", "requests >= 2.28.1"] +private_endpoints_extra_require = [ + "urllib3 >=1.21.1, <1.27", + "requests >= 2.28.1", +] autologging_extra_require = ["mlflow>=1.27.0,<=2.1.1"] @@ -98,9 +104,12 @@ ray_extra_require = [ # Cluster only supports 2.4.0 and 2.9.3 - "ray[default] >= 2.4, <= 2.9.3,!= 2.5.*,!= 2.6.*,!= 2.7.*,!= 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2; python_version<'3.11'", + ( + "ray[default] >= 2.4, <= 2.9.3,!= 2.5.*,!= 2.6.*,!= 2.7.*,!=" + " 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2; python_version<'3.11'" + ), # Ray Data v2.4 in Python 3.11 is broken, but got fixed in Ray v2.5. - "ray[default] >= 2.5, <= 2.9.3; python_version>='3.11'", + "ray[default] >= 2.5, <= 2.9.3; python_version=='3.11'", "google-cloud-bigquery-storage", "google-cloud-bigquery", "pandas >= 1.0.0, < 2.2.0", @@ -119,7 +128,10 @@ ray_testing_extra_require = ray_extra_require + [ "pytest-xdist", # ray train extras required for prediction tests - "ray[train] >= 2.4, <= 2.9.3,!= 2.5.*,!= 2.6.*,!= 2.7.*,!= 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2", + ( + "ray[train] >= 2.4, <= 2.9.3,!= 2.5.*,!= 2.6.*,!= 2.7.*,!=" + " 2.8.*,!=2.9.0,!=2.9.1,!=2.9.2" + ), # Framework version constraints copied from testing_extra_require "scikit-learn", "tensorflow", @@ -128,6 +140,22 @@ "xgboost_ray", ] +reasoning_engine_extra_require = [ + "cloudpickle >= 2.2.1, < 3.0", + "pydantic < 3", +] + +rapid_evaluation_extra_require = [ + "nest_asyncio >= 1.0.0, < 1.6.0", + "pandas >= 1.0.0, < 2.2.0", +] + +langchain_extra_require = [ + "langchain >= 0.1.13, < 0.2", + "langchain-core < 0.2", + "langchain-google-vertexai < 0.2", +] + full_extra_require = list( set( tensorboard_extra_require @@ -144,6 +172,8 @@ + autologging_extra_require + preview_extra_require + ray_extra_require + + reasoning_engine_extra_require + + rapid_evaluation_extra_require ) ) testing_extra_require = ( @@ -161,14 +191,15 @@ "pytest-xdist", "scikit-learn", # Lazy import requires > 2.12.0 - "tensorflow == 2.13.0", + "tensorflow == 2.13.0; python_version<='3.11'", + "tensorflow == 2.16.1; python_version>'3.11'", # TODO(jayceeli) torch 2.1.0 has conflict with pyfakefs, will check if # future versions fix this issue - "torch >= 2.0.0, < 2.1.0", - "xgboost", - "xgboost_ray", + "torch >= 2.0.0, < 2.1.0; python_version<='3.11'", + "torch >= 2.2.0; python_version>'3.11'", "requests-toolbelt < 1.0.0", "immutabledict", + "xgboost", ] ) @@ -194,7 +225,10 @@ platforms="Posix; MacOS X; Windows", include_package_data=True, install_requires=( - "google-api-core[grpc] >= 1.34.1, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*", + ( + "google-api-core[grpc] >= 1.34.1," + " <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*" + ), "google-auth >= 2.14.1, <3.0.0dev", "proto-plus >= 1.22.0, <2.0.0dev", "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", @@ -223,6 +257,9 @@ "preview": preview_extra_require, "ray": ray_extra_require, "ray_testing": ray_testing_extra_require, + "reasoningengine": reasoning_engine_extra_require, + "rapid_evaluation": rapid_evaluation_extra_require, + "langchain": langchain_extra_require, }, python_requires=">=3.8", classifiers=[ diff --git a/testing/constraints-3.10.txt b/testing/constraints-3.10.txt index dc35519572..36f5fbbb50 100644 --- a/testing/constraints-3.10.txt +++ b/testing/constraints-3.10.txt @@ -11,4 +11,5 @@ grpcio-testing==1.34.0 mlflow==1.30.1 # Pinned to speed up installation pytest-xdist==3.3.1 # Pinned to unbreak unit tests ray==2.4.0 # Pinned until 2.9.3 is verified for Ray tests -IPython # Added to test supernova rich html buttons +ipython==8.22.2 # Pinned to unbreak TypeAliasType import error +scikit-learn!=1.4.1.post1 # Pin to unbreak test_sklearn (b/332610038) diff --git a/testing/constraints-3.11.txt b/testing/constraints-3.11.txt index a622537154..e374579f1f 100644 --- a/testing/constraints-3.11.txt +++ b/testing/constraints-3.11.txt @@ -8,3 +8,4 @@ mock==4.0.2 google-cloud-storage==2.2.1 # Increased for kfp 2.0 compatibility pytest-xdist==3.3.1 # Pinned to unbreak unit tests ray==2.5.0 # Pinned until 2.9.3 is verified for Ray tests +ipython==8.22.2 # Pinned to unbreak TypeAliasType import error diff --git a/testing/constraints-3.12.txt b/testing/constraints-3.12.txt index ed7f9aed25..e374579f1f 100644 --- a/testing/constraints-3.12.txt +++ b/testing/constraints-3.12.txt @@ -4,3 +4,8 @@ google-api-core proto-plus protobuf +mock==4.0.2 +google-cloud-storage==2.2.1 # Increased for kfp 2.0 compatibility +pytest-xdist==3.3.1 # Pinned to unbreak unit tests +ray==2.5.0 # Pinned until 2.9.3 is verified for Ray tests +ipython==8.22.2 # Pinned to unbreak TypeAliasType import error diff --git a/tests/system/aiplatform/e2e_base.py b/tests/system/aiplatform/e2e_base.py index cb120c6c7b..ccd31c33de 100644 --- a/tests/system/aiplatform/e2e_base.py +++ b/tests/system/aiplatform/e2e_base.py @@ -175,6 +175,9 @@ def tear_down_resources(self, shared_state: Dict[str, Any]): yield + if "resources" not in shared_state: + return + # TODO(b/218310362): Add resource deletion system tests # Bring all Endpoints to the front of the list # Ensures Models are undeployed first before we attempt deletion diff --git a/tests/system/aiplatform/test_persistent_resource.py b/tests/system/aiplatform/test_persistent_resource.py index c9bafc5927..66268d1818 100644 --- a/tests/system/aiplatform/test_persistent_resource.py +++ b/tests/system/aiplatform/test_persistent_resource.py @@ -19,13 +19,13 @@ from google.cloud import aiplatform from google.cloud.aiplatform import initializer +from google.cloud.aiplatform import persistent_resource from google.cloud.aiplatform.compat.types import ( - machine_resources_v1beta1 as gca_machine_resources, + machine_resources_v1 as gca_machine_resources, ) from google.cloud.aiplatform.compat.types import ( - persistent_resource_v1beta1 as gca_persistent_resource, + persistent_resource_v1 as gca_persistent_resource, ) -from google.cloud.aiplatform.preview import persistent_resource from tests.system.aiplatform import e2e_base import pytest diff --git a/tests/system/vertexai/test_bigframes_tensorflow.py b/tests/system/vertexai/test_bigframes_tensorflow.py index 18cb384cff..5759008cbb 100644 --- a/tests/system/vertexai/test_bigframes_tensorflow.py +++ b/tests/system/vertexai/test_bigframes_tensorflow.py @@ -64,6 +64,10 @@ class TestRemoteExecutionBigframesTensorflow(e2e_base.TestEndToEnd): _temp_prefix = "temp-vertexai-remote-execution" + # TODO(b/313893962): Re-enable after fixing the broken test. + @pytest.mark.skip( + reason="Known issue for removing tensorflow from the top level imports." + ) def test_remote_execution_keras(self, shared_state): # Initialize vertexai vertexai.init( diff --git a/tests/system/vertexai/test_generative_models.py b/tests/system/vertexai/test_generative_models.py index 696edec23f..0da0690a60 100644 --- a/tests/system/vertexai/test_generative_models.py +++ b/tests/system/vertexai/test_generative_models.py @@ -121,7 +121,13 @@ async def test_generate_content_streaming_async(self): assert chunk.text def test_generate_content_with_parameters(self): - model = generative_models.GenerativeModel("gemini-pro") + model = generative_models.GenerativeModel( + "gemini-pro", + system_instruction=[ + "Talk like a pirate.", + "Don't use rude words.", + ], + ) response = model.generate_content( contents="Why is sky blue?", generation_config=generative_models.GenerationConfig( diff --git a/tests/system/vertexai/test_tensorflow.py b/tests/system/vertexai/test_tensorflow.py index af53fd2c90..9f797703da 100644 --- a/tests/system/vertexai/test_tensorflow.py +++ b/tests/system/vertexai/test_tensorflow.py @@ -62,6 +62,10 @@ class TestRemoteExecutionTensorflow(e2e_base.TestEndToEnd): _temp_prefix = "temp-vertexai-remote-execution" + # TODO(b/313893962): Re-enable after fixing the broken test. + @pytest.mark.skip( + reason="Known issue for removing tensorflow from the top level imports." + ) def test_remote_execution_keras(self, shared_state): # Initialize vertexai vertexai.init( diff --git a/tests/unit/aiplatform/test_custom_job_persistent_resource.py b/tests/unit/aiplatform/test_custom_job_persistent_resource.py index 3b23c05fcd..bfdab91c02 100644 --- a/tests/unit/aiplatform/test_custom_job_persistent_resource.py +++ b/tests/unit/aiplatform/test_custom_job_persistent_resource.py @@ -20,16 +20,17 @@ from unittest.mock import patch from google.cloud import aiplatform -from google.cloud.aiplatform.compat.services import ( - job_service_client_v1beta1, +from google.cloud.aiplatform import jobs +from google.cloud.aiplatform.compat.services import job_service_client_v1 +from google.cloud.aiplatform.compat.types import ( + custom_job as gca_custom_job_compat, ) -from google.cloud.aiplatform.compat.types import custom_job_v1beta1 -from google.cloud.aiplatform.compat.types import encryption_spec_v1beta1 -from google.cloud.aiplatform.compat.types import io_v1beta1 +from google.cloud.aiplatform.compat.types import custom_job_v1 +from google.cloud.aiplatform.compat.types import encryption_spec_v1 +from google.cloud.aiplatform.compat.types import io_v1 from google.cloud.aiplatform.compat.types import ( - job_state_v1beta1 as gca_job_state_compat, + job_state_v1 as gca_job_state_compat, ) -from google.cloud.aiplatform.preview import jobs import constants as test_constants import pytest @@ -58,7 +59,7 @@ # CMEK encryption _TEST_DEFAULT_ENCRYPTION_KEY_NAME = "key_1234" -_TEST_DEFAULT_ENCRYPTION_SPEC = encryption_spec_v1beta1.EncryptionSpec( +_TEST_DEFAULT_ENCRYPTION_SPEC = encryption_spec_v1.EncryptionSpec( kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME ) @@ -75,17 +76,22 @@ _TEST_LABELS = test_constants.ProjectConstants._TEST_LABELS +_TEST_PYTHON_PACKAGE_SPEC = gca_custom_job_compat.PythonPackageSpec( + executor_image_uri=_TEST_PREBUILT_CONTAINER_IMAGE, + package_uris=[test_constants.TrainingJobConstants._TEST_OUTPUT_PYTHON_PACKAGE_PATH], + python_module=test_constants.TrainingJobConstants._TEST_MODULE_NAME, +) # Persistent Resource _TEST_PERSISTENT_RESOURCE_ID = "test-persistent-resource-1" -_TEST_CUSTOM_JOB_WITH_PERSISTENT_RESOURCE_PROTO = custom_job_v1beta1.CustomJob( +_TEST_CUSTOM_JOB_WITH_PERSISTENT_RESOURCE_PROTO = custom_job_v1.CustomJob( display_name=_TEST_DISPLAY_NAME, - job_spec=custom_job_v1beta1.CustomJobSpec( + job_spec=custom_job_v1.CustomJobSpec( worker_pool_specs=_TEST_WORKER_POOL_SPEC, - base_output_directory=io_v1beta1.GcsDestination( + base_output_directory=io_v1.GcsDestination( output_uri_prefix=_TEST_BASE_OUTPUT_DIR ), - scheduling=custom_job_v1beta1.Scheduling( + scheduling=custom_job_v1.Scheduling( timeout=duration_pb2.Duration(seconds=_TEST_TIMEOUT), restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, disable_retries=_TEST_DISABLE_RETRIES, @@ -108,21 +114,21 @@ def _get_custom_job_proto(state=None, name=None, error=None): @pytest.fixture -def create_preview_custom_job_mock(): +def create_custom_job_mock(): with mock.patch.object( - job_service_client_v1beta1.JobServiceClient, "create_custom_job" - ) as create_preview_custom_job_mock: - create_preview_custom_job_mock.return_value = _get_custom_job_proto( + job_service_client_v1.JobServiceClient, "create_custom_job" + ) as create_custom_job_mock: + create_custom_job_mock.return_value = _get_custom_job_proto( name=_TEST_CUSTOM_JOB_NAME, state=gca_job_state_compat.JobState.JOB_STATE_PENDING, ) - yield create_preview_custom_job_mock + yield create_custom_job_mock @pytest.fixture def get_custom_job_mock(): with patch.object( - job_service_client_v1beta1.JobServiceClient, "get_custom_job" + job_service_client_v1.JobServiceClient, "get_custom_job" ) as get_custom_job_mock: get_custom_job_mock.side_effect = [ _get_custom_job_proto( @@ -152,7 +158,7 @@ def teardown_method(self): @pytest.mark.parametrize("sync", [True, False]) def test_create_custom_job_with_persistent_resource( - self, create_preview_custom_job_mock, get_custom_job_mock, sync + self, create_custom_job_mock, get_custom_job_mock, sync ): aiplatform.init( @@ -188,7 +194,7 @@ def test_create_custom_job_with_persistent_resource( expected_custom_job = _get_custom_job_proto() - create_preview_custom_job_mock.assert_called_once_with( + create_custom_job_mock.assert_called_once_with( parent=_TEST_PARENT, custom_job=expected_custom_job, timeout=None, @@ -201,7 +207,7 @@ def test_create_custom_job_with_persistent_resource( assert job.network == _TEST_NETWORK def test_submit_custom_job_with_persistent_resource( - self, create_preview_custom_job_mock, get_custom_job_mock + self, create_custom_job_mock, get_custom_job_mock ): aiplatform.init( @@ -216,7 +222,6 @@ def test_submit_custom_job_with_persistent_resource( worker_pool_specs=_TEST_WORKER_POOL_SPEC, base_output_dir=_TEST_BASE_OUTPUT_DIR, labels=_TEST_LABELS, - persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, ) job.submit( @@ -226,6 +231,7 @@ def test_submit_custom_job_with_persistent_resource( restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, create_request_timeout=None, disable_retries=_TEST_DISABLE_RETRIES, + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, ) job.wait_for_resource_creation() @@ -236,7 +242,7 @@ def test_submit_custom_job_with_persistent_resource( expected_custom_job = _get_custom_job_proto() - create_preview_custom_job_mock.assert_called_once_with( + create_custom_job_mock.assert_called_once_with( parent=_TEST_PARENT, custom_job=expected_custom_job, timeout=None, @@ -247,3 +253,93 @@ def test_submit_custom_job_with_persistent_resource( job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_PENDING ) assert job.network == _TEST_NETWORK + + @pytest.mark.parametrize("sync", [True, False]) + def test_run_custom_job_with_persistent_resource( + self, create_custom_job_mock, get_custom_job_mock, sync + ): + + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + staging_bucket=_TEST_STAGING_BUCKET, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = jobs.CustomJob( + display_name=_TEST_DISPLAY_NAME, + worker_pool_specs=_TEST_WORKER_POOL_SPEC, + base_output_dir=_TEST_BASE_OUTPUT_DIR, + labels=_TEST_LABELS, + ) + + job.run( + service_account=_TEST_SERVICE_ACCOUNT, + network=_TEST_NETWORK, + timeout=_TEST_TIMEOUT, + restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, + create_request_timeout=None, + disable_retries=_TEST_DISABLE_RETRIES, + sync=sync, + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + ) + + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_CUSTOM_JOB_NAME + + job.wait() + + expected_custom_job = _get_custom_job_proto() + + create_custom_job_mock.assert_called_once_with( + parent=_TEST_PARENT, + custom_job=expected_custom_job, + timeout=None, + ) + + assert job.job_spec == expected_custom_job.job_spec + assert ( + job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED + ) + assert job.network == _TEST_NETWORK + + @pytest.mark.usefixtures("mock_python_package_to_gcs") + @pytest.mark.parametrize("sync", [True, False]) + def test_from_local_script_custom_job_with_persistent_resource( + self, create_custom_job_mock, get_custom_job_mock, sync + ): + + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + staging_bucket=_TEST_STAGING_BUCKET, + encryption_spec_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME, + ) + + job = jobs.CustomJob.from_local_script( + display_name=_TEST_DISPLAY_NAME, + script_path=test_constants.TrainingJobConstants._TEST_LOCAL_SCRIPT_FILE_NAME, + container_uri=_TEST_PREBUILT_CONTAINER_IMAGE, + base_output_dir=_TEST_BASE_OUTPUT_DIR, + labels=_TEST_LABELS, + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + ) + + assert ( + job.job_spec.worker_pool_specs[0].python_package_spec + == _TEST_PYTHON_PACKAGE_SPEC + ) + + job.run(sync=sync) + + job.wait_for_resource_creation() + + assert job.resource_name == _TEST_CUSTOM_JOB_NAME + + job.wait() + + assert job.job_spec.persistent_resource_id == _TEST_PERSISTENT_RESOURCE_ID + assert ( + job._gca_resource.state == gca_job_state_compat.JobState.JOB_STATE_SUCCEEDED + ) diff --git a/tests/unit/aiplatform/test_hyperparameter_tuning_job_persistent_resource.py b/tests/unit/aiplatform/test_hyperparameter_tuning_job_persistent_resource.py index 9b528c5c87..792d9c51b0 100644 --- a/tests/unit/aiplatform/test_hyperparameter_tuning_job_persistent_resource.py +++ b/tests/unit/aiplatform/test_hyperparameter_tuning_job_persistent_resource.py @@ -21,18 +21,18 @@ from google.cloud import aiplatform from google.cloud.aiplatform.compat.services import ( - job_service_client_v1beta1, + job_service_client_v1, ) from google.cloud.aiplatform import hyperparameter_tuning as hpt from google.cloud.aiplatform.compat.types import ( - custom_job_v1beta1, - encryption_spec_v1beta1, - hyperparameter_tuning_job_v1beta1, - io_v1beta1, - job_state_v1beta1 as gca_job_state_compat, - study_v1beta1 as gca_study_compat, + custom_job_v1, + encryption_spec_v1, + hyperparameter_tuning_job_v1, + io_v1, + job_state_v1 as gca_job_state_compat, + study_v1 as gca_study_compat, ) -from google.cloud.aiplatform.preview import jobs +from google.cloud.aiplatform import jobs import constants as test_constants import pytest @@ -59,7 +59,7 @@ # CMEK encryption _TEST_DEFAULT_ENCRYPTION_KEY_NAME = "key_1234" -_TEST_DEFAULT_ENCRYPTION_SPEC = encryption_spec_v1beta1.EncryptionSpec( +_TEST_DEFAULT_ENCRYPTION_SPEC = encryption_spec_v1.EncryptionSpec( kms_key_name=_TEST_DEFAULT_ENCRYPTION_KEY_NAME ) @@ -95,12 +95,12 @@ # Persistent Resource _TEST_PERSISTENT_RESOURCE_ID = "test-persistent-resource-1" -_TEST_TRIAL_JOB_SPEC = custom_job_v1beta1.CustomJobSpec( +_TEST_TRIAL_JOB_SPEC = custom_job_v1.CustomJobSpec( worker_pool_specs=test_constants.TrainingJobConstants._TEST_WORKER_POOL_SPEC, - base_output_directory=io_v1beta1.GcsDestination( + base_output_directory=io_v1.GcsDestination( output_uri_prefix=test_constants.TrainingJobConstants._TEST_BASE_OUTPUT_DIR ), - scheduling=custom_job_v1beta1.Scheduling( + scheduling=custom_job_v1.Scheduling( timeout=duration_pb2.Duration(seconds=_TEST_TIMEOUT), restart_job_on_worker_restart=_TEST_RESTART_JOB_ON_WORKER_RESTART, disable_retries=_TEST_DISABLE_RETRIES, @@ -110,7 +110,7 @@ persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, ) -_TEST_BASE_HYPERPARAMETER_TUNING_JOB_WITH_PERSISTENT_RESOURCE_PROTO = hyperparameter_tuning_job_v1beta1.HyperparameterTuningJob( +_TEST_BASE_HYPERPARAMETER_TUNING_JOB_WITH_PERSISTENT_RESOURCE_PROTO = hyperparameter_tuning_job_v1.HyperparameterTuningJob( display_name=_TEST_DISPLAY_NAME, study_spec=gca_study_compat.StudySpec( metrics=[ @@ -197,23 +197,23 @@ def _get_hyperparameter_tuning_job_proto(state=None, name=None, error=None): @pytest.fixture -def create_preview_hyperparameter_tuning_job_mock(): +def create_hyperparameter_tuning_job_mock(): with mock.patch.object( - job_service_client_v1beta1.JobServiceClient, "create_hyperparameter_tuning_job" - ) as create_preview_hyperparameter_tuning_job_mock: - create_preview_hyperparameter_tuning_job_mock.return_value = ( + job_service_client_v1.JobServiceClient, "create_hyperparameter_tuning_job" + ) as create_hyperparameter_tuning_job_mock: + create_hyperparameter_tuning_job_mock.return_value = ( _get_hyperparameter_tuning_job_proto( name=_TEST_HYPERPARAMETERTUNING_JOB_NAME, state=gca_job_state_compat.JobState.JOB_STATE_PENDING, ) ) - yield create_preview_hyperparameter_tuning_job_mock + yield create_hyperparameter_tuning_job_mock @pytest.fixture def get_hyperparameter_tuning_job_mock(): with patch.object( - job_service_client_v1beta1.JobServiceClient, "get_hyperparameter_tuning_job" + job_service_client_v1.JobServiceClient, "get_hyperparameter_tuning_job" ) as get_hyperparameter_tuning_job_mock: get_hyperparameter_tuning_job_mock.side_effect = [ _get_hyperparameter_tuning_job_proto( @@ -248,7 +248,7 @@ def teardown_method(self): @pytest.mark.parametrize("sync", [True, False]) def test_create_hyperparameter_tuning_job_with_persistent_resource( self, - create_preview_hyperparameter_tuning_job_mock, + create_hyperparameter_tuning_job_mock, get_hyperparameter_tuning_job_mock, sync, ): @@ -308,7 +308,7 @@ def test_create_hyperparameter_tuning_job_with_persistent_resource( expected_hyperparameter_tuning_job = _get_hyperparameter_tuning_job_proto() - create_preview_hyperparameter_tuning_job_mock.assert_called_once_with( + create_hyperparameter_tuning_job_mock.assert_called_once_with( parent=_TEST_PARENT, hyperparameter_tuning_job=expected_hyperparameter_tuning_job, timeout=None, diff --git a/tests/unit/aiplatform/test_persistent_resource.py b/tests/unit/aiplatform/test_persistent_resource.py index b3480a6c0a..14421e9066 100644 --- a/tests/unit/aiplatform/test_persistent_resource.py +++ b/tests/unit/aiplatform/test_persistent_resource.py @@ -22,14 +22,14 @@ from google.api_core import operation as ga_operation from google.cloud import aiplatform from google.cloud.aiplatform.compat.services import ( - persistent_resource_service_client_v1beta1, + persistent_resource_service_client_v1, ) -from google.cloud.aiplatform.compat.types import encryption_spec_v1beta1 +from google.cloud.aiplatform.compat.types import encryption_spec_v1 from google.cloud.aiplatform.compat.types import ( - persistent_resource_service_v1beta1, + persistent_resource_service_v1, ) -from google.cloud.aiplatform.compat.types import persistent_resource_v1beta1 -from google.cloud.aiplatform.preview import persistent_resource +from google.cloud.aiplatform.compat.types import persistent_resource_v1 +from google.cloud.aiplatform import persistent_resource import constants as test_constants import pytest @@ -50,7 +50,7 @@ _TEST_KEY_NAME = test_constants.TrainingJobConstants._TEST_DEFAULT_ENCRYPTION_KEY_NAME _TEST_SERVICE_ACCOUNT = test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT -_TEST_PERSISTENT_RESOURCE_PROTO = persistent_resource_v1beta1.PersistentResource( +_TEST_PERSISTENT_RESOURCE_PROTO = persistent_resource_v1.PersistentResource( name=_TEST_PERSISTENT_RESOURCE_ID, resource_pools=[ test_constants.PersistentResourceConstants._TEST_RESOURCE_POOL, @@ -60,7 +60,7 @@ def _get_persistent_resource_proto( state=None, name=None, error=None -) -> persistent_resource_v1beta1.PersistentResource: +) -> persistent_resource_v1.PersistentResource: persistent_resource_proto = copy.deepcopy(_TEST_PERSISTENT_RESOURCE_PROTO) persistent_resource_proto.name = name persistent_resource_proto.state = state @@ -75,49 +75,49 @@ def _get_resource_name(name=None, project=_TEST_PROJECT, location=_TEST_LOCATION @pytest.fixture -def create_preview_persistent_resource_mock(): +def create_persistent_resource_mock(): with mock.patch.object( - (persistent_resource_service_client_v1beta1.PersistentResourceServiceClient), + (persistent_resource_service_client_v1.PersistentResourceServiceClient), "create_persistent_resource", - ) as create_preview_persistent_resource_mock: + ) as create_persistent_resource_mock: create_lro = mock.Mock(ga_operation.Operation) create_lro.result.return_value = None - create_preview_persistent_resource_mock.return_value = create_lro - yield create_preview_persistent_resource_mock + create_persistent_resource_mock.return_value = create_lro + yield create_persistent_resource_mock @pytest.fixture -def get_preview_persistent_resource_mock(): +def get_persistent_resource_mock(): with mock.patch.object( - (persistent_resource_service_client_v1beta1.PersistentResourceServiceClient), + (persistent_resource_service_client_v1.PersistentResourceServiceClient), "get_persistent_resource", - ) as get_preview_persistent_resource_mock: - get_preview_persistent_resource_mock.side_effect = [ + ) as get_persistent_resource_mock: + get_persistent_resource_mock.side_effect = [ _get_persistent_resource_proto( name=_TEST_PERSISTENT_RESOURCE_ID, - state=(persistent_resource_v1beta1.PersistentResource.State.RUNNING), + state=(persistent_resource_v1.PersistentResource.State.RUNNING), ), ] - yield get_preview_persistent_resource_mock + yield get_persistent_resource_mock _TEST_LIST_RESOURCE_1 = _get_persistent_resource_proto( name="resource_1", - state=(persistent_resource_v1beta1.PersistentResource.State.RUNNING), + state=(persistent_resource_v1.PersistentResource.State.RUNNING), ) _TEST_LIST_RESOURCE_2 = _get_persistent_resource_proto( name="resource_2", - state=(persistent_resource_v1beta1.PersistentResource.State.PROVISIONING), + state=(persistent_resource_v1.PersistentResource.State.PROVISIONING), ) _TEST_LIST_RESOURCE_3 = _get_persistent_resource_proto( name="resource_3", - state=(persistent_resource_v1beta1.PersistentResource.State.STOPPING), + state=(persistent_resource_v1.PersistentResource.State.STOPPING), ) _TEST_LIST_RESOURCE_4 = _get_persistent_resource_proto( name="resource_4", - state=(persistent_resource_v1beta1.PersistentResource.State.ERROR), + state=(persistent_resource_v1.PersistentResource.State.ERROR), ) _TEST_PERSISTENT_RESOURCE_LIST = [ @@ -129,30 +129,28 @@ def get_preview_persistent_resource_mock(): @pytest.fixture -def list_preview_persistent_resources_mock(): +def list_persistent_resources_mock(): with mock.patch.object( - (persistent_resource_service_client_v1beta1.PersistentResourceServiceClient), + (persistent_resource_service_client_v1.PersistentResourceServiceClient), "list_persistent_resources", - ) as list_preview_persistent_resources_mock: - list_preview_persistent_resources_mock.return_value = ( - _TEST_PERSISTENT_RESOURCE_LIST - ) + ) as list_persistent_resources_mock: + list_persistent_resources_mock.return_value = _TEST_PERSISTENT_RESOURCE_LIST - yield list_preview_persistent_resources_mock + yield list_persistent_resources_mock @pytest.fixture -def delete_preview_persistent_resource_mock(): +def delete_persistent_resource_mock(): with mock.patch.object( - (persistent_resource_service_client_v1beta1.PersistentResourceServiceClient), + (persistent_resource_service_client_v1.PersistentResourceServiceClient), "delete_persistent_resource", - ) as delete_preview_persistent_resource_mock: + ) as delete_persistent_resource_mock: delete_lro = mock.Mock(ga_operation.Operation) delete_lro.result.return_value = ( - persistent_resource_service_v1beta1.DeletePersistentResourceRequest() + persistent_resource_service_v1.DeletePersistentResourceRequest() ) - delete_preview_persistent_resource_mock.return_value = delete_lro - yield delete_preview_persistent_resource_mock + delete_persistent_resource_mock.return_value = delete_lro + yield delete_persistent_resource_mock @pytest.mark.usefixtures("google_auth_mock") @@ -168,8 +166,8 @@ def teardown_method(self): @pytest.mark.parametrize("sync", [True, False]) def test_create_persistent_resource( self, - create_preview_persistent_resource_mock, - get_preview_persistent_resource_mock, + create_persistent_resource_mock, + get_persistent_resource_mock, sync, ): my_test_resource = persistent_resource.PersistentResource.create( @@ -194,15 +192,15 @@ def test_create_persistent_resource( ) expected_persistent_resource_arg.labels = _TEST_LABELS - create_preview_persistent_resource_mock.assert_called_once_with( + create_persistent_resource_mock.assert_called_once_with( parent=_TEST_PARENT, persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, persistent_resource=expected_persistent_resource_arg, timeout=None, ) - get_preview_persistent_resource_mock.assert_called_once() - _, mock_kwargs = get_preview_persistent_resource_mock.call_args + get_persistent_resource_mock.assert_called_once() + _, mock_kwargs = get_persistent_resource_mock.call_args assert mock_kwargs["name"] == _get_resource_name( name=_TEST_PERSISTENT_RESOURCE_ID ) @@ -210,8 +208,8 @@ def test_create_persistent_resource( @pytest.mark.parametrize("sync", [True, False]) def test_create_persistent_resource_with_network( self, - create_preview_persistent_resource_mock, - get_preview_persistent_resource_mock, + create_persistent_resource_mock, + get_persistent_resource_mock, sync, ): my_test_resource = persistent_resource.PersistentResource.create( @@ -234,14 +232,14 @@ def test_create_persistent_resource_with_network( expected_persistent_resource_arg.network = _TEST_NETWORK expected_persistent_resource_arg.reserved_ip_ranges = _TEST_RESERVED_IP_RANGES - create_preview_persistent_resource_mock.assert_called_once_with( + create_persistent_resource_mock.assert_called_once_with( parent=_TEST_PARENT, persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, persistent_resource=expected_persistent_resource_arg, timeout=None, ) - get_preview_persistent_resource_mock.assert_called_once() - _, mock_kwargs = get_preview_persistent_resource_mock.call_args + get_persistent_resource_mock.assert_called_once() + _, mock_kwargs = get_persistent_resource_mock.call_args assert mock_kwargs["name"] == _get_resource_name( name=_TEST_PERSISTENT_RESOURCE_ID ) @@ -249,8 +247,8 @@ def test_create_persistent_resource_with_network( @pytest.mark.parametrize("sync", [True, False]) def test_create_persistent_resource_with_kms_key( self, - create_preview_persistent_resource_mock, - get_preview_persistent_resource_mock, + create_persistent_resource_mock, + get_persistent_resource_mock, sync, ): my_test_resource = persistent_resource.PersistentResource.create( @@ -270,17 +268,17 @@ def test_create_persistent_resource_with_kms_key( ) expected_persistent_resource_arg.encryption_spec = ( - encryption_spec_v1beta1.EncryptionSpec(kms_key_name=_TEST_KEY_NAME) + encryption_spec_v1.EncryptionSpec(kms_key_name=_TEST_KEY_NAME) ) - create_preview_persistent_resource_mock.assert_called_once_with( + create_persistent_resource_mock.assert_called_once_with( parent=_TEST_PARENT, persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, persistent_resource=expected_persistent_resource_arg, timeout=None, ) - get_preview_persistent_resource_mock.assert_called_once() - _, mock_kwargs = get_preview_persistent_resource_mock.call_args + get_persistent_resource_mock.assert_called_once() + _, mock_kwargs = get_persistent_resource_mock.call_args assert mock_kwargs["name"] == _get_resource_name( name=_TEST_PERSISTENT_RESOURCE_ID ) @@ -288,8 +286,8 @@ def test_create_persistent_resource_with_kms_key( @pytest.mark.parametrize("sync", [True, False]) def test_create_persistent_resource_with_service_account( self, - create_preview_persistent_resource_mock, - get_preview_persistent_resource_mock, + create_persistent_resource_mock, + get_persistent_resource_mock, sync, ): my_test_resource = persistent_resource.PersistentResource.create( @@ -308,31 +306,31 @@ def test_create_persistent_resource_with_service_account( name=_TEST_PERSISTENT_RESOURCE_ID, ) - service_account_spec = persistent_resource_v1beta1.ServiceAccountSpec( + service_account_spec = persistent_resource_v1.ServiceAccountSpec( enable_custom_service_account=True, service_account=_TEST_SERVICE_ACCOUNT ) expected_persistent_resource_arg.resource_runtime_spec = ( - persistent_resource_v1beta1.ResourceRuntimeSpec( + persistent_resource_v1.ResourceRuntimeSpec( service_account_spec=service_account_spec ) ) - create_preview_persistent_resource_mock.assert_called_once_with( + create_persistent_resource_mock.assert_called_once_with( parent=_TEST_PARENT, persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, persistent_resource=expected_persistent_resource_arg, timeout=None, ) - get_preview_persistent_resource_mock.assert_called_once() - _, mock_kwargs = get_preview_persistent_resource_mock.call_args + get_persistent_resource_mock.assert_called_once() + _, mock_kwargs = get_persistent_resource_mock.call_args assert mock_kwargs["name"] == _get_resource_name( name=_TEST_PERSISTENT_RESOURCE_ID ) - def test_list_persistent_resources(self, list_preview_persistent_resources_mock): + def test_list_persistent_resources(self, list_persistent_resources_mock): resource_list = persistent_resource.PersistentResource.list() - list_preview_persistent_resources_mock.assert_called_once() + list_persistent_resources_mock.assert_called_once() assert len(resource_list) == len(_TEST_PERSISTENT_RESOURCE_LIST) for i in range(len(resource_list)): @@ -345,8 +343,8 @@ def test_list_persistent_resources(self, list_preview_persistent_resources_mock) @pytest.mark.parametrize("sync", [True, False]) def test_delete_persistent_resource( self, - get_preview_persistent_resource_mock, - delete_preview_persistent_resource_mock, + get_persistent_resource_mock, + delete_persistent_resource_mock, sync, ): test_resource = persistent_resource.PersistentResource( @@ -357,7 +355,7 @@ def test_delete_persistent_resource( if not sync: test_resource.wait() - get_preview_persistent_resource_mock.assert_called_once() - delete_preview_persistent_resource_mock.assert_called_once_with( + get_persistent_resource_mock.assert_called_once() + delete_persistent_resource_mock.assert_called_once_with( name=_TEST_PERSISTENT_RESOURCE_ID, ) diff --git a/tests/unit/aiplatform/test_persistent_resource_preview.py b/tests/unit/aiplatform/test_persistent_resource_preview.py new file mode 100644 index 0000000000..f189af2a8b --- /dev/null +++ b/tests/unit/aiplatform/test_persistent_resource_preview.py @@ -0,0 +1,368 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import copy +import importlib +from unittest import mock + +from google.api_core import operation as ga_operation +from google.cloud import aiplatform +from google.cloud.aiplatform.compat.services import ( + persistent_resource_service_client_v1, +) +from google.cloud.aiplatform.compat.services import ( + persistent_resource_service_client_v1beta1 as persistent_resource_service_client_compat, +) +from google.cloud.aiplatform.compat.types import ( + encryption_spec_v1beta1 as encryption_spec_compat, +) +from google.cloud.aiplatform.compat.types import ( + persistent_resource_service_v1beta1 as persistent_resource_service_compat, +) +from google.cloud.aiplatform.compat.types import ( + persistent_resource_v1beta1 as persistent_resource_compat, +) +from google.cloud.aiplatform.preview import persistent_resource +import constants as test_constants +import pytest + + +_TEST_PROJECT = test_constants.ProjectConstants._TEST_PROJECT +_TEST_LOCATION = test_constants.ProjectConstants._TEST_LOCATION +_TEST_PARENT = test_constants.ProjectConstants._TEST_PARENT + +_TEST_PERSISTENT_RESOURCE_ID = ( + test_constants.PersistentResourceConstants._TEST_PERSISTENT_RESOURCE_ID +) +_TEST_PERSISTENT_RESOURCE_DISPLAY_NAME = ( + test_constants.PersistentResourceConstants._TEST_PERSISTENT_RESOURCE_DISPLAY_NAME +) +_TEST_LABELS = test_constants.ProjectConstants._TEST_LABELS +_TEST_NETWORK = test_constants.TrainingJobConstants._TEST_NETWORK +_TEST_RESERVED_IP_RANGES = test_constants.TrainingJobConstants._TEST_RESERVED_IP_RANGES +_TEST_KEY_NAME = test_constants.TrainingJobConstants._TEST_DEFAULT_ENCRYPTION_KEY_NAME +_TEST_SERVICE_ACCOUNT = test_constants.ProjectConstants._TEST_SERVICE_ACCOUNT + +_TEST_PERSISTENT_RESOURCE_PROTO = persistent_resource_compat.PersistentResource( + name=_TEST_PERSISTENT_RESOURCE_ID, + resource_pools=[ + test_constants.PersistentResourceConstants._TEST_RESOURCE_POOL, + ], +) + + +def _get_persistent_resource_proto( + state=None, name=None, error=None +) -> persistent_resource_compat.PersistentResource: + persistent_resource_proto = copy.deepcopy(_TEST_PERSISTENT_RESOURCE_PROTO) + persistent_resource_proto.name = name + persistent_resource_proto.state = state + persistent_resource_proto.error = error + return persistent_resource_proto + + +def _get_resource_name(name=None, project=_TEST_PROJECT, location=_TEST_LOCATION): + return "projects/{}/locations/{}/persistentResources/{}".format( + project, location, name + ) + + +@pytest.fixture +def create_preview_persistent_resource_mock(): + with mock.patch.object( + (persistent_resource_service_client_compat.PersistentResourceServiceClient), + "create_persistent_resource", + ) as create_persistent_resource_mock: + create_lro = mock.Mock(ga_operation.Operation) + create_lro.result.return_value = None + + create_persistent_resource_mock.return_value = create_lro + yield create_persistent_resource_mock + + +@pytest.fixture +def get_persistent_resource_mock(): + with mock.patch.object( + (persistent_resource_service_client_v1.PersistentResourceServiceClient), + "get_persistent_resource", + ) as get_persistent_resource_mock: + get_persistent_resource_mock.side_effect = [ + _get_persistent_resource_proto( + name=_TEST_PERSISTENT_RESOURCE_ID, + state=(persistent_resource_compat.PersistentResource.State.RUNNING), + ), + ] + + yield get_persistent_resource_mock + + +_TEST_LIST_RESOURCE_1 = _get_persistent_resource_proto( + name="resource_1", + state=(persistent_resource_compat.PersistentResource.State.RUNNING), +) +_TEST_LIST_RESOURCE_2 = _get_persistent_resource_proto( + name="resource_2", + state=(persistent_resource_compat.PersistentResource.State.PROVISIONING), +) +_TEST_LIST_RESOURCE_3 = _get_persistent_resource_proto( + name="resource_3", + state=(persistent_resource_compat.PersistentResource.State.STOPPING), +) +_TEST_LIST_RESOURCE_4 = _get_persistent_resource_proto( + name="resource_4", + state=(persistent_resource_compat.PersistentResource.State.ERROR), +) + +_TEST_PERSISTENT_RESOURCE_LIST = [ + _TEST_LIST_RESOURCE_1, + _TEST_LIST_RESOURCE_2, + _TEST_LIST_RESOURCE_3, + _TEST_LIST_RESOURCE_4, +] + + +@pytest.fixture +def list_persistent_resources_mock(): + with mock.patch.object( + (persistent_resource_service_client_v1.PersistentResourceServiceClient), + "list_persistent_resources", + ) as list_persistent_resources_mock: + list_persistent_resources_mock.return_value = _TEST_PERSISTENT_RESOURCE_LIST + + yield list_persistent_resources_mock + + +@pytest.fixture +def delete_persistent_resource_mock(): + with mock.patch.object( + (persistent_resource_service_client_v1.PersistentResourceServiceClient), + "delete_persistent_resource", + ) as delete_persistent_resource_mock: + delete_lro = mock.Mock(ga_operation.Operation) + delete_lro.result.return_value = ( + persistent_resource_service_compat.DeletePersistentResourceRequest() + ) + delete_persistent_resource_mock.return_value = delete_lro + yield delete_persistent_resource_mock + + +@pytest.mark.usefixtures("google_auth_mock") +class TestPersistentResourcePreview: + def setup_method(self): + importlib.reload(aiplatform.initializer) + importlib.reload(aiplatform) + aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION) + + def teardown_method(self): + aiplatform.initializer.global_pool.shutdown(wait=True) + + @pytest.mark.parametrize("sync", [True, False]) + def test_create_persistent_resource( + self, + create_preview_persistent_resource_mock, + get_persistent_resource_mock, + sync, + ): + my_test_resource = persistent_resource.PersistentResource.create( + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + display_name=_TEST_PERSISTENT_RESOURCE_DISPLAY_NAME, + resource_pools=[ + test_constants.PersistentResourceConstants._TEST_RESOURCE_POOL, + ], + labels=_TEST_LABELS, + sync=sync, + ) + + if not sync: + my_test_resource.wait() + + expected_persistent_resource_arg = _get_persistent_resource_proto( + name=_TEST_PERSISTENT_RESOURCE_ID, + ) + + expected_persistent_resource_arg.display_name = ( + _TEST_PERSISTENT_RESOURCE_DISPLAY_NAME + ) + expected_persistent_resource_arg.labels = _TEST_LABELS + + create_preview_persistent_resource_mock.assert_called_once_with( + parent=_TEST_PARENT, + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + persistent_resource=expected_persistent_resource_arg, + timeout=None, + ) + + get_persistent_resource_mock.assert_called_once() + _, mock_kwargs = get_persistent_resource_mock.call_args + assert mock_kwargs["name"] == _get_resource_name( + name=_TEST_PERSISTENT_RESOURCE_ID + ) + + @pytest.mark.parametrize("sync", [True, False]) + def test_create_persistent_resource_with_network( + self, + create_preview_persistent_resource_mock, + get_persistent_resource_mock, + sync, + ): + my_test_resource = persistent_resource.PersistentResource.create( + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + resource_pools=[ + test_constants.PersistentResourceConstants._TEST_RESOURCE_POOL, + ], + network=_TEST_NETWORK, + reserved_ip_ranges=_TEST_RESERVED_IP_RANGES, + sync=sync, + ) + + if not sync: + my_test_resource.wait() + + expected_persistent_resource_arg = _get_persistent_resource_proto( + name=_TEST_PERSISTENT_RESOURCE_ID, + ) + + expected_persistent_resource_arg.network = _TEST_NETWORK + expected_persistent_resource_arg.reserved_ip_ranges = _TEST_RESERVED_IP_RANGES + + create_preview_persistent_resource_mock.assert_called_once_with( + parent=_TEST_PARENT, + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + persistent_resource=expected_persistent_resource_arg, + timeout=None, + ) + get_persistent_resource_mock.assert_called_once() + _, mock_kwargs = get_persistent_resource_mock.call_args + assert mock_kwargs["name"] == _get_resource_name( + name=_TEST_PERSISTENT_RESOURCE_ID + ) + + @pytest.mark.parametrize("sync", [True, False]) + def test_create_persistent_resource_with_kms_key( + self, + create_preview_persistent_resource_mock, + get_persistent_resource_mock, + sync, + ): + my_test_resource = persistent_resource.PersistentResource.create( + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + resource_pools=[ + test_constants.PersistentResourceConstants._TEST_RESOURCE_POOL, + ], + kms_key_name=_TEST_KEY_NAME, + sync=sync, + ) + + if not sync: + my_test_resource.wait() + + expected_persistent_resource_arg = _get_persistent_resource_proto( + name=_TEST_PERSISTENT_RESOURCE_ID, + ) + + expected_persistent_resource_arg.encryption_spec = ( + encryption_spec_compat.EncryptionSpec(kms_key_name=_TEST_KEY_NAME) + ) + + create_preview_persistent_resource_mock.assert_called_once_with( + parent=_TEST_PARENT, + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + persistent_resource=expected_persistent_resource_arg, + timeout=None, + ) + get_persistent_resource_mock.assert_called_once() + _, mock_kwargs = get_persistent_resource_mock.call_args + assert mock_kwargs["name"] == _get_resource_name( + name=_TEST_PERSISTENT_RESOURCE_ID + ) + + @pytest.mark.parametrize("sync", [True, False]) + def test_create_persistent_resource_with_service_account( + self, + create_preview_persistent_resource_mock, + get_persistent_resource_mock, + sync, + ): + my_test_resource = persistent_resource.PersistentResource.create( + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + resource_pools=[ + test_constants.PersistentResourceConstants._TEST_RESOURCE_POOL, + ], + service_account=_TEST_SERVICE_ACCOUNT, + sync=sync, + ) + + if not sync: + my_test_resource.wait() + + expected_persistent_resource_arg = _get_persistent_resource_proto( + name=_TEST_PERSISTENT_RESOURCE_ID, + ) + + service_account_spec = persistent_resource_compat.ServiceAccountSpec( + enable_custom_service_account=True, service_account=_TEST_SERVICE_ACCOUNT + ) + expected_persistent_resource_arg.resource_runtime_spec = ( + persistent_resource_compat.ResourceRuntimeSpec( + service_account_spec=service_account_spec + ) + ) + + create_preview_persistent_resource_mock.assert_called_once_with( + parent=_TEST_PARENT, + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + persistent_resource=expected_persistent_resource_arg, + timeout=None, + ) + get_persistent_resource_mock.assert_called_once() + _, mock_kwargs = get_persistent_resource_mock.call_args + assert mock_kwargs["name"] == _get_resource_name( + name=_TEST_PERSISTENT_RESOURCE_ID + ) + + def test_list_persistent_resources(self, list_persistent_resources_mock): + resource_list = persistent_resource.PersistentResource.list() + + list_persistent_resources_mock.assert_called_once() + assert len(resource_list) == len(_TEST_PERSISTENT_RESOURCE_LIST) + + for i in range(len(resource_list)): + actual_resource = resource_list[i] + expected_resource = _TEST_PERSISTENT_RESOURCE_LIST[i] + + assert actual_resource.name == expected_resource.name + assert actual_resource.state == expected_resource.state + + @pytest.mark.parametrize("sync", [True, False]) + def test_delete_persistent_resource( + self, + get_persistent_resource_mock, + delete_persistent_resource_mock, + sync, + ): + test_resource = persistent_resource.PersistentResource( + _TEST_PERSISTENT_RESOURCE_ID + ) + test_resource.delete(sync=sync) + + if not sync: + test_resource.wait() + + get_persistent_resource_mock.assert_called_once() + delete_persistent_resource_mock.assert_called_once_with( + name=_TEST_PERSISTENT_RESOURCE_ID, + ) diff --git a/tests/unit/aiplatform/test_training_jobs.py b/tests/unit/aiplatform/test_training_jobs.py index eb807116f9..a22e8c07f7 100644 --- a/tests/unit/aiplatform/test_training_jobs.py +++ b/tests/unit/aiplatform/test_training_jobs.py @@ -238,6 +238,9 @@ _TEST_ENABLE_DASHBOARD_ACCESS = True _TEST_WEB_ACCESS_URIS = test_constants.TrainingJobConstants._TEST_WEB_ACCESS_URIS _TEST_DASHBOARD_ACCESS_URIS = {"workerpool0-0:8888": "uri"} +_TEST_PERSISTENT_RESOURCE_ID = ( + test_constants.PersistentResourceConstants._TEST_PERSISTENT_RESOURCE_ID +) _TEST_BASE_CUSTOM_JOB_PROTO = gca_custom_job.CustomJob( job_spec=gca_custom_job.CustomJobSpec(), @@ -268,6 +271,17 @@ def _get_custom_job_proto_with_enable_dashboard_access( return custom_job_proto +def _get_custom_job_proto_with_persistent_resource_id( + state=None, name=None, version="v1" +): + custom_job_proto = copy.deepcopy(_TEST_BASE_CUSTOM_JOB_PROTO) + custom_job_proto.name = name + custom_job_proto.state = state + custom_job_proto.job_spec.persistent_resource_id = _TEST_PERSISTENT_RESOURCE_ID + + return custom_job_proto + + def _get_custom_job_proto_with_scheduling(state=None, name=None, version="v1"): custom_job_proto = copy.deepcopy(_TEST_BASE_CUSTOM_JOB_PROTO) custom_job_proto.name = name @@ -428,6 +442,40 @@ def mock_get_backing_custom_job_with_enable_dashboard_access(): yield get_custom_job_mock +@pytest.fixture +def mock_get_backing_custom_job_with_persistent_resource_id(): + with patch.object( + job_service_client.JobServiceClient, "get_custom_job" + ) as get_custom_job_mock: + get_custom_job_mock.side_effect = [ + _get_custom_job_proto_with_persistent_resource_id( + name=_TEST_CUSTOM_JOB_RESOURCE_NAME, + state=gca_job_state.JobState.JOB_STATE_PENDING, + ), + _get_custom_job_proto_with_persistent_resource_id( + name=_TEST_CUSTOM_JOB_RESOURCE_NAME, + state=gca_job_state.JobState.JOB_STATE_RUNNING, + ), + _get_custom_job_proto_with_persistent_resource_id( + name=_TEST_CUSTOM_JOB_RESOURCE_NAME, + state=gca_job_state.JobState.JOB_STATE_RUNNING, + ), + _get_custom_job_proto_with_persistent_resource_id( + name=_TEST_CUSTOM_JOB_RESOURCE_NAME, + state=gca_job_state.JobState.JOB_STATE_RUNNING, + ), + _get_custom_job_proto_with_persistent_resource_id( + name=_TEST_CUSTOM_JOB_RESOURCE_NAME, + state=gca_job_state.JobState.JOB_STATE_SUCCEEDED, + ), + _get_custom_job_proto_with_persistent_resource_id( + name=_TEST_CUSTOM_JOB_RESOURCE_NAME, + state=gca_job_state.JobState.JOB_STATE_SUCCEEDED, + ), + ] + yield get_custom_job_mock + + @pytest.mark.skipif( sys.executable is None, reason="requires python path to invoke subprocess" ) @@ -725,6 +773,19 @@ def make_training_pipeline_with_enable_dashboard_access(state): return training_pipeline +def make_training_pipeline_with_persistent_resource_id(state): + training_pipeline = gca_training_pipeline.TrainingPipeline( + name=_TEST_PIPELINE_RESOURCE_NAME, + state=state, + training_task_inputs={"persistent_resource_id": _TEST_PERSISTENT_RESOURCE_ID}, + ) + if state == gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING: + training_pipeline.training_task_metadata = { + "backingCustomJob": _TEST_CUSTOM_JOB_RESOURCE_NAME + } + return training_pipeline + + def make_training_pipeline_with_scheduling(state): training_pipeline = gca_training_pipeline.TrainingPipeline( name=_TEST_PIPELINE_RESOURCE_NAME, @@ -826,6 +887,35 @@ def mock_pipeline_service_get_with_enable_dashboard_access(): yield mock_get_training_pipeline +@pytest.fixture +def mock_pipeline_service_get_with_persistent_resource_id(): + with mock.patch.object( + pipeline_service_client.PipelineServiceClient, "get_training_pipeline" + ) as mock_get_training_pipeline: + mock_get_training_pipeline.side_effect = [ + make_training_pipeline_with_persistent_resource_id( + state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING, + ), + make_training_pipeline_with_persistent_resource_id( + state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING, + ), + make_training_pipeline_with_persistent_resource_id( + state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING, + ), + make_training_pipeline_with_persistent_resource_id( + state=gca_pipeline_state.PipelineState.PIPELINE_STATE_RUNNING, + ), + make_training_pipeline_with_persistent_resource_id( + state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED, + ), + make_training_pipeline_with_persistent_resource_id( + state=gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED, + ), + ] + + yield mock_get_training_pipeline + + @pytest.fixture def mock_pipeline_service_get_with_scheduling(): with mock.patch.object( @@ -903,6 +993,19 @@ def mock_pipeline_service_create_with_enable_dashboard_access(): yield mock_create_training_pipeline +@pytest.fixture +def mock_pipeline_service_create_with_persistent_resource_id(): + with mock.patch.object( + pipeline_service_client.PipelineServiceClient, "create_training_pipeline" + ) as mock_create_training_pipeline: + mock_create_training_pipeline.return_value = ( + make_training_pipeline_with_persistent_resource_id( + state=gca_pipeline_state.PipelineState.PIPELINE_STATE_PENDING, + ) + ) + yield mock_create_training_pipeline + + @pytest.fixture def mock_pipeline_service_create_with_scheduling(): with mock.patch.object( @@ -3101,6 +3204,51 @@ def test_cancel_training_job_without_running(self, mock_pipeline_service_cancel) assert e.match(regexp=r"TrainingJob has not been launched") + @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1) + @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1) + @pytest.mark.usefixtures( + "mock_pipeline_service_create_with_persistent_resource_id", + "mock_pipeline_service_get_with_persistent_resource_id", + "mock_get_backing_custom_job_with_persistent_resource_id", + "mock_python_package_to_gcs", + ) + @pytest.mark.parametrize("sync", [True, False]) + def test_run_call_pipeline_service_create_with_persistent_resource_id( + self, sync, caplog + ): + + caplog.set_level(logging.INFO) + + aiplatform.init( + project=_TEST_PROJECT, + staging_bucket=_TEST_BUCKET_NAME, + credentials=_TEST_CREDENTIALS, + ) + + job = training_jobs.CustomTrainingJob( + display_name=_TEST_DISPLAY_NAME, + script_path=_TEST_LOCAL_SCRIPT_FILE_NAME, + container_uri=_TEST_TRAINING_CONTAINER_IMAGE, + ) + + job.run( + base_output_dir=_TEST_BASE_OUTPUT_DIR, + args=_TEST_RUN_ARGS, + machine_type=_TEST_MACHINE_TYPE, + accelerator_type=_TEST_ACCELERATOR_TYPE, + accelerator_count=_TEST_ACCELERATOR_COUNT, + sync=sync, + create_request_timeout=None, + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + ) + + if not sync: + job.wait() + + assert job._gca_resource == make_training_pipeline_with_persistent_resource_id( + gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED + ) + @pytest.mark.usefixtures("google_auth_mock") class TestCustomContainerTrainingJob: @@ -4898,6 +5046,51 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset_raises_if_anno create_request_timeout=None, ) + @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1) + @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1) + @pytest.mark.usefixtures( + "mock_pipeline_service_create_with_persistent_resource_id", + "mock_pipeline_service_get_with_persistent_resource_id", + "mock_get_backing_custom_job_with_persistent_resource_id", + ) + @pytest.mark.parametrize("sync", [True, False]) + def test_run_call_pipeline_service_create_with_persistent_resource_id( + self, sync, caplog + ): + + caplog.set_level(logging.INFO) + + aiplatform.init( + project=_TEST_PROJECT, + staging_bucket=_TEST_BUCKET_NAME, + credentials=_TEST_CREDENTIALS, + ) + + job = training_jobs.CustomContainerTrainingJob( + display_name=_TEST_DISPLAY_NAME, + container_uri=_TEST_TRAINING_CONTAINER_IMAGE, + command=_TEST_TRAINING_CONTAINER_CMD, + ) + + job.run( + base_output_dir=_TEST_BASE_OUTPUT_DIR, + args=_TEST_RUN_ARGS, + machine_type=_TEST_MACHINE_TYPE, + accelerator_type=_TEST_ACCELERATOR_TYPE, + accelerator_count=_TEST_ACCELERATOR_COUNT, + sync=sync, + create_request_timeout=None, + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + ) + + if not sync: + job.wait() + + print(caplog.text) + assert job._gca_resource == make_training_pipeline_with_persistent_resource_id( + gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED + ) + class Test_WorkerPoolSpec: def test_machine_spec_return_spec_dict(self): @@ -7192,6 +7385,52 @@ def test_run_call_pipeline_service_create_with_nontabular_dataset_raises_if_anno model_display_name=_TEST_MODEL_DISPLAY_NAME, ) + @mock.patch.object(training_jobs, "_JOB_WAIT_TIME", 1) + @mock.patch.object(training_jobs, "_LOG_WAIT_TIME", 1) + @pytest.mark.usefixtures( + "mock_pipeline_service_create_with_persistent_resource_id", + "mock_pipeline_service_get_with_persistent_resource_id", + "mock_get_backing_custom_job_with_persistent_resource_id", + ) + @pytest.mark.parametrize("sync", [True, False]) + def test_run_call_pipeline_service_create_with_persistent_resource_id( + self, sync, caplog + ): + + caplog.set_level(logging.INFO) + + aiplatform.init( + project=_TEST_PROJECT, + staging_bucket=_TEST_BUCKET_NAME, + credentials=_TEST_CREDENTIALS, + ) + + job = training_jobs.CustomPythonPackageTrainingJob( + display_name=_TEST_DISPLAY_NAME, + python_package_gcs_uri=_TEST_OUTPUT_PYTHON_PACKAGE_PATH, + python_module_name=_TEST_PYTHON_MODULE_NAME, + container_uri=_TEST_TRAINING_CONTAINER_IMAGE, + ) + + job.run( + base_output_dir=_TEST_BASE_OUTPUT_DIR, + args=_TEST_RUN_ARGS, + machine_type=_TEST_MACHINE_TYPE, + accelerator_type=_TEST_ACCELERATOR_TYPE, + accelerator_count=_TEST_ACCELERATOR_COUNT, + sync=sync, + create_request_timeout=None, + persistent_resource_id=_TEST_PERSISTENT_RESOURCE_ID, + ) + + if not sync: + job.wait() + + print(caplog.text) + assert job._gca_resource == make_training_pipeline_with_persistent_resource_id( + gca_pipeline_state.PipelineState.PIPELINE_STATE_SUCCEEDED + ) + class TestVersionedTrainingJobs: @pytest.mark.usefixtures("mock_pipeline_service_get") diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py new file mode 100644 index 0000000000..52c48eab19 --- /dev/null +++ b/tests/unit/conftest.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- + +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import sys + +collect_ignore = [] + +if sys.version_info > (3, 11): + collect_ignore = [ + "aiplatform/test_cloud_profiler.py", + "aiplatform/test_uploader.py", + "aiplatform/test_uploader_main.py", + "aiplatform/test_autologging.py", + "aiplatform/test_explain_lit.py", + "aiplatform/test_explain_saved_model_metadata_builder_tf2_test.py", + "aiplatform/test_metadata_models.py", + "aiplatform/test_endpoints.py", + "aiplatform/test_model_evaluation.py", + "aiplatform/test_models.py", + "aiplatform/test_utils.py", + "vertexai/test_reasoning_engines.py", + "vertexai/test_model_utils.py", + "vertexai/test_remote_training.py", + "vertexai/test_serializers.py", + "vertexai/test_vizier_hyperparameter_tuner.py", + ] diff --git a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py index 9263d3e3ae..54f1f3f70b 100644 --- a/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_endpoint_service.py @@ -3919,6 +3919,7 @@ def test_create_endpoint_rest(request_type): "latent_space_source": "latent_space_source_value", }, }, + "disable_explanations": True, "service_account": "service_account_value", "disable_container_logging": True, "enable_access_logging": True, @@ -4988,6 +4989,7 @@ def test_update_endpoint_rest(request_type): "latent_space_source": "latent_space_source_value", }, }, + "disable_explanations": True, "service_account": "service_account_value", "disable_container_logging": True, "enable_access_logging": True, diff --git a/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py b/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py new file mode 100644 index 0000000000..e1bb44ac27 --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1/test_gen_ai_tuning_service.py @@ -0,0 +1,6850 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from google.api_core import api_core_version +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import ( + GenAiTuningServiceAsyncClient, +) +from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import ( + GenAiTuningServiceClient, +) +from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import pagers +from google.cloud.aiplatform_v1.services.gen_ai_tuning_service import transports +from google.cloud.aiplatform_v1.types import content +from google.cloud.aiplatform_v1.types import genai_tuning_service +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import tool +from google.cloud.aiplatform_v1.types import tuning_job +from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert GenAiTuningServiceClient._get_default_mtls_endpoint(None) is None + assert ( + GenAiTuningServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + GenAiTuningServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + GenAiTuningServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + GenAiTuningServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + GenAiTuningServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test__read_environment_variables(): + assert GenAiTuningServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert GenAiTuningServiceClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert GenAiTuningServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + GenAiTuningServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert GenAiTuningServiceClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert GenAiTuningServiceClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert GenAiTuningServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + GenAiTuningServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert GenAiTuningServiceClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert GenAiTuningServiceClient._get_client_cert_source(None, False) is None + assert ( + GenAiTuningServiceClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + GenAiTuningServiceClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + GenAiTuningServiceClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + GenAiTuningServiceClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + GenAiTuningServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(GenAiTuningServiceClient), +) +@mock.patch.object( + GenAiTuningServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(GenAiTuningServiceAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = GenAiTuningServiceClient._DEFAULT_UNIVERSE + default_endpoint = GenAiTuningServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = GenAiTuningServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + GenAiTuningServiceClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + GenAiTuningServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == GenAiTuningServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + GenAiTuningServiceClient._get_api_endpoint(None, None, default_universe, "auto") + == default_endpoint + ) + assert ( + GenAiTuningServiceClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == GenAiTuningServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + GenAiTuningServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == GenAiTuningServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + GenAiTuningServiceClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + GenAiTuningServiceClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + GenAiTuningServiceClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + GenAiTuningServiceClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + GenAiTuningServiceClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + GenAiTuningServiceClient._get_universe_domain(None, None) + == GenAiTuningServiceClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + GenAiTuningServiceClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (GenAiTuningServiceClient, transports.GenAiTuningServiceGrpcTransport, "grpc"), + (GenAiTuningServiceClient, transports.GenAiTuningServiceRestTransport, "rest"), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (GenAiTuningServiceClient, "grpc"), + (GenAiTuningServiceAsyncClient, "grpc_asyncio"), + (GenAiTuningServiceClient, "rest"), + ], +) +def test_gen_ai_tuning_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.GenAiTuningServiceGrpcTransport, "grpc"), + (transports.GenAiTuningServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.GenAiTuningServiceRestTransport, "rest"), + ], +) +def test_gen_ai_tuning_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (GenAiTuningServiceClient, "grpc"), + (GenAiTuningServiceAsyncClient, "grpc_asyncio"), + (GenAiTuningServiceClient, "rest"), + ], +) +def test_gen_ai_tuning_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +def test_gen_ai_tuning_service_client_get_transport_class(): + transport = GenAiTuningServiceClient.get_transport_class() + available_transports = [ + transports.GenAiTuningServiceGrpcTransport, + transports.GenAiTuningServiceRestTransport, + ] + assert transport in available_transports + + transport = GenAiTuningServiceClient.get_transport_class("grpc") + assert transport == transports.GenAiTuningServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (GenAiTuningServiceClient, transports.GenAiTuningServiceGrpcTransport, "grpc"), + ( + GenAiTuningServiceAsyncClient, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (GenAiTuningServiceClient, transports.GenAiTuningServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + GenAiTuningServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(GenAiTuningServiceClient), +) +@mock.patch.object( + GenAiTuningServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(GenAiTuningServiceAsyncClient), +) +def test_gen_ai_tuning_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(GenAiTuningServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(GenAiTuningServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + GenAiTuningServiceClient, + transports.GenAiTuningServiceGrpcTransport, + "grpc", + "true", + ), + ( + GenAiTuningServiceAsyncClient, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + GenAiTuningServiceClient, + transports.GenAiTuningServiceGrpcTransport, + "grpc", + "false", + ), + ( + GenAiTuningServiceAsyncClient, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + GenAiTuningServiceClient, + transports.GenAiTuningServiceRestTransport, + "rest", + "true", + ), + ( + GenAiTuningServiceClient, + transports.GenAiTuningServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + GenAiTuningServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(GenAiTuningServiceClient), +) +@mock.patch.object( + GenAiTuningServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(GenAiTuningServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_gen_ai_tuning_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [GenAiTuningServiceClient, GenAiTuningServiceAsyncClient] +) +@mock.patch.object( + GenAiTuningServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenAiTuningServiceClient), +) +@mock.patch.object( + GenAiTuningServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(GenAiTuningServiceAsyncClient), +) +def test_gen_ai_tuning_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", [GenAiTuningServiceClient, GenAiTuningServiceAsyncClient] +) +@mock.patch.object( + GenAiTuningServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(GenAiTuningServiceClient), +) +@mock.patch.object( + GenAiTuningServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(GenAiTuningServiceAsyncClient), +) +def test_gen_ai_tuning_service_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = GenAiTuningServiceClient._DEFAULT_UNIVERSE + default_endpoint = GenAiTuningServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = GenAiTuningServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (GenAiTuningServiceClient, transports.GenAiTuningServiceGrpcTransport, "grpc"), + ( + GenAiTuningServiceAsyncClient, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (GenAiTuningServiceClient, transports.GenAiTuningServiceRestTransport, "rest"), + ], +) +def test_gen_ai_tuning_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + GenAiTuningServiceClient, + transports.GenAiTuningServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + GenAiTuningServiceAsyncClient, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + GenAiTuningServiceClient, + transports.GenAiTuningServiceRestTransport, + "rest", + None, + ), + ], +) +def test_gen_ai_tuning_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_gen_ai_tuning_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1.services.gen_ai_tuning_service.transports.GenAiTuningServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = GenAiTuningServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + GenAiTuningServiceClient, + transports.GenAiTuningServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + GenAiTuningServiceAsyncClient, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_gen_ai_tuning_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + genai_tuning_service.CreateTuningJobRequest, + dict, + ], +) +def test_create_tuning_job(request_type, transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuning_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tuning_job.TuningJob( + name="name_value", + tuned_model_display_name="tuned_model_display_name_value", + description="description_value", + state=job_state.JobState.JOB_STATE_QUEUED, + experiment="experiment_value", + base_model="base_model_value", + ) + response = client.create_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = genai_tuning_service.CreateTuningJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tuning_job.TuningJob) + assert response.name == "name_value" + assert response.tuned_model_display_name == "tuned_model_display_name_value" + assert response.description == "description_value" + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.experiment == "experiment_value" + + +def test_create_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuning_job), "__call__" + ) as call: + client.create_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == genai_tuning_service.CreateTuningJobRequest() + + +def test_create_tuning_job_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = genai_tuning_service.CreateTuningJobRequest( + parent="parent_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuning_job), "__call__" + ) as call: + client.create_tuning_job(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == genai_tuning_service.CreateTuningJobRequest( + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_create_tuning_job_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuning_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tuning_job.TuningJob( + name="name_value", + tuned_model_display_name="tuned_model_display_name_value", + description="description_value", + state=job_state.JobState.JOB_STATE_QUEUED, + experiment="experiment_value", + ) + ) + response = await client.create_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == genai_tuning_service.CreateTuningJobRequest() + + +@pytest.mark.asyncio +async def test_create_tuning_job_async( + transport: str = "grpc_asyncio", + request_type=genai_tuning_service.CreateTuningJobRequest, +): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuning_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tuning_job.TuningJob( + name="name_value", + tuned_model_display_name="tuned_model_display_name_value", + description="description_value", + state=job_state.JobState.JOB_STATE_QUEUED, + experiment="experiment_value", + ) + ) + response = await client.create_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = genai_tuning_service.CreateTuningJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tuning_job.TuningJob) + assert response.name == "name_value" + assert response.tuned_model_display_name == "tuned_model_display_name_value" + assert response.description == "description_value" + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.experiment == "experiment_value" + + +@pytest.mark.asyncio +async def test_create_tuning_job_async_from_dict(): + await test_create_tuning_job_async(request_type=dict) + + +def test_create_tuning_job_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = genai_tuning_service.CreateTuningJobRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuning_job), "__call__" + ) as call: + call.return_value = gca_tuning_job.TuningJob() + client.create_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_tuning_job_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = genai_tuning_service.CreateTuningJobRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuning_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tuning_job.TuningJob() + ) + await client.create_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_tuning_job_flattened(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuning_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tuning_job.TuningJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_tuning_job( + parent="parent_value", + tuning_job=gca_tuning_job.TuningJob(base_model="base_model_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].tuning_job + mock_val = gca_tuning_job.TuningJob(base_model="base_model_value") + assert arg == mock_val + + +def test_create_tuning_job_flattened_error(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tuning_job( + genai_tuning_service.CreateTuningJobRequest(), + parent="parent_value", + tuning_job=gca_tuning_job.TuningJob(base_model="base_model_value"), + ) + + +@pytest.mark.asyncio +async def test_create_tuning_job_flattened_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_tuning_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gca_tuning_job.TuningJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_tuning_job.TuningJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_tuning_job( + parent="parent_value", + tuning_job=gca_tuning_job.TuningJob(base_model="base_model_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].tuning_job + mock_val = gca_tuning_job.TuningJob(base_model="base_model_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_tuning_job_flattened_error_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_tuning_job( + genai_tuning_service.CreateTuningJobRequest(), + parent="parent_value", + tuning_job=gca_tuning_job.TuningJob(base_model="base_model_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + genai_tuning_service.GetTuningJobRequest, + dict, + ], +) +def test_get_tuning_job(request_type, transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuning_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = tuning_job.TuningJob( + name="name_value", + tuned_model_display_name="tuned_model_display_name_value", + description="description_value", + state=job_state.JobState.JOB_STATE_QUEUED, + experiment="experiment_value", + base_model="base_model_value", + ) + response = client.get_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = genai_tuning_service.GetTuningJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, tuning_job.TuningJob) + assert response.name == "name_value" + assert response.tuned_model_display_name == "tuned_model_display_name_value" + assert response.description == "description_value" + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.experiment == "experiment_value" + + +def test_get_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuning_job), "__call__") as call: + client.get_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == genai_tuning_service.GetTuningJobRequest() + + +def test_get_tuning_job_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = genai_tuning_service.GetTuningJobRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuning_job), "__call__") as call: + client.get_tuning_job(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == genai_tuning_service.GetTuningJobRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_tuning_job_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuning_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tuning_job.TuningJob( + name="name_value", + tuned_model_display_name="tuned_model_display_name_value", + description="description_value", + state=job_state.JobState.JOB_STATE_QUEUED, + experiment="experiment_value", + ) + ) + response = await client.get_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == genai_tuning_service.GetTuningJobRequest() + + +@pytest.mark.asyncio +async def test_get_tuning_job_async( + transport: str = "grpc_asyncio", + request_type=genai_tuning_service.GetTuningJobRequest, +): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuning_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tuning_job.TuningJob( + name="name_value", + tuned_model_display_name="tuned_model_display_name_value", + description="description_value", + state=job_state.JobState.JOB_STATE_QUEUED, + experiment="experiment_value", + ) + ) + response = await client.get_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = genai_tuning_service.GetTuningJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, tuning_job.TuningJob) + assert response.name == "name_value" + assert response.tuned_model_display_name == "tuned_model_display_name_value" + assert response.description == "description_value" + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.experiment == "experiment_value" + + +@pytest.mark.asyncio +async def test_get_tuning_job_async_from_dict(): + await test_get_tuning_job_async(request_type=dict) + + +def test_get_tuning_job_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = genai_tuning_service.GetTuningJobRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuning_job), "__call__") as call: + call.return_value = tuning_job.TuningJob() + client.get_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_tuning_job_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = genai_tuning_service.GetTuningJobRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuning_job), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tuning_job.TuningJob() + ) + await client.get_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_tuning_job_flattened(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuning_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = tuning_job.TuningJob() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_tuning_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_tuning_job_flattened_error(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tuning_job( + genai_tuning_service.GetTuningJobRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_tuning_job_flattened_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_tuning_job), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = tuning_job.TuningJob() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + tuning_job.TuningJob() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_tuning_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_tuning_job_flattened_error_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_tuning_job( + genai_tuning_service.GetTuningJobRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + genai_tuning_service.ListTuningJobsRequest, + dict, + ], +) +def test_list_tuning_jobs(request_type, transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tuning_jobs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = genai_tuning_service.ListTuningJobsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = genai_tuning_service.ListTuningJobsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTuningJobsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_tuning_jobs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tuning_jobs), "__call__") as call: + client.list_tuning_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == genai_tuning_service.ListTuningJobsRequest() + + +def test_list_tuning_jobs_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = genai_tuning_service.ListTuningJobsRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tuning_jobs), "__call__") as call: + client.list_tuning_jobs(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == genai_tuning_service.ListTuningJobsRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + ) + + +@pytest.mark.asyncio +async def test_list_tuning_jobs_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tuning_jobs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + genai_tuning_service.ListTuningJobsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_tuning_jobs() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == genai_tuning_service.ListTuningJobsRequest() + + +@pytest.mark.asyncio +async def test_list_tuning_jobs_async( + transport: str = "grpc_asyncio", + request_type=genai_tuning_service.ListTuningJobsRequest, +): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tuning_jobs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + genai_tuning_service.ListTuningJobsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = genai_tuning_service.ListTuningJobsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTuningJobsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_tuning_jobs_async_from_dict(): + await test_list_tuning_jobs_async(request_type=dict) + + +def test_list_tuning_jobs_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = genai_tuning_service.ListTuningJobsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tuning_jobs), "__call__") as call: + call.return_value = genai_tuning_service.ListTuningJobsResponse() + client.list_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_tuning_jobs_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = genai_tuning_service.ListTuningJobsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tuning_jobs), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + genai_tuning_service.ListTuningJobsResponse() + ) + await client.list_tuning_jobs(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_tuning_jobs_flattened(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tuning_jobs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = genai_tuning_service.ListTuningJobsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tuning_jobs( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_tuning_jobs_flattened_error(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tuning_jobs( + genai_tuning_service.ListTuningJobsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_tuning_jobs_flattened_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tuning_jobs), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = genai_tuning_service.ListTuningJobsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + genai_tuning_service.ListTuningJobsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tuning_jobs( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_tuning_jobs_flattened_error_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tuning_jobs( + genai_tuning_service.ListTuningJobsRequest(), + parent="parent_value", + ) + + +def test_list_tuning_jobs_pager(transport_name: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tuning_jobs), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + tuning_job.TuningJob(), + tuning_job.TuningJob(), + ], + next_page_token="abc", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[], + next_page_token="def", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + ], + next_page_token="ghi", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + tuning_job.TuningJob(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_tuning_jobs(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tuning_job.TuningJob) for i in results) + + +def test_list_tuning_jobs_pages(transport_name: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_tuning_jobs), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + tuning_job.TuningJob(), + tuning_job.TuningJob(), + ], + next_page_token="abc", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[], + next_page_token="def", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + ], + next_page_token="ghi", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + tuning_job.TuningJob(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tuning_jobs(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_tuning_jobs_async_pager(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tuning_jobs), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + tuning_job.TuningJob(), + tuning_job.TuningJob(), + ], + next_page_token="abc", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[], + next_page_token="def", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + ], + next_page_token="ghi", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + tuning_job.TuningJob(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tuning_jobs( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, tuning_job.TuningJob) for i in responses) + + +@pytest.mark.asyncio +async def test_list_tuning_jobs_async_pages(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tuning_jobs), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + tuning_job.TuningJob(), + tuning_job.TuningJob(), + ], + next_page_token="abc", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[], + next_page_token="def", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + ], + next_page_token="ghi", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + tuning_job.TuningJob(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_tuning_jobs(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + genai_tuning_service.CancelTuningJobRequest, + dict, + ], +) +def test_cancel_tuning_job(request_type, transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_tuning_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = genai_tuning_service.CancelTuningJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_tuning_job_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_tuning_job), "__call__" + ) as call: + client.cancel_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == genai_tuning_service.CancelTuningJobRequest() + + +def test_cancel_tuning_job_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = genai_tuning_service.CancelTuningJobRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_tuning_job), "__call__" + ) as call: + client.cancel_tuning_job(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == genai_tuning_service.CancelTuningJobRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_cancel_tuning_job_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_tuning_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_tuning_job() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == genai_tuning_service.CancelTuningJobRequest() + + +@pytest.mark.asyncio +async def test_cancel_tuning_job_async( + transport: str = "grpc_asyncio", + request_type=genai_tuning_service.CancelTuningJobRequest, +): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_tuning_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = genai_tuning_service.CancelTuningJobRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_tuning_job_async_from_dict(): + await test_cancel_tuning_job_async(request_type=dict) + + +def test_cancel_tuning_job_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = genai_tuning_service.CancelTuningJobRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_tuning_job), "__call__" + ) as call: + call.return_value = None + client.cancel_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_tuning_job_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = genai_tuning_service.CancelTuningJobRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_tuning_job), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_tuning_job(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_cancel_tuning_job_flattened(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_tuning_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_tuning_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_cancel_tuning_job_flattened_error(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_tuning_job( + genai_tuning_service.CancelTuningJobRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_cancel_tuning_job_flattened_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_tuning_job), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_tuning_job( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_cancel_tuning_job_flattened_error_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_tuning_job( + genai_tuning_service.CancelTuningJobRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + genai_tuning_service.CreateTuningJobRequest, + dict, + ], +) +def test_create_tuning_job_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["tuning_job"] = { + "base_model": "base_model_value", + "supervised_tuning_spec": { + "training_dataset_uri": "training_dataset_uri_value", + "validation_dataset_uri": "validation_dataset_uri_value", + "hyper_parameters": { + "epoch_count": 1175, + "learning_rate_multiplier": 0.2561, + "adapter_size": 1, + }, + }, + "name": "name_value", + "tuned_model_display_name": "tuned_model_display_name_value", + "description": "description_value", + "state": 1, + "create_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "end_time": {}, + "update_time": {}, + "error": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "labels": {}, + "experiment": "experiment_value", + "tuned_model": {"model": "model_value", "endpoint": "endpoint_value"}, + "tuning_data_stats": { + "supervised_tuning_data_stats": { + "tuning_dataset_example_count": 2989, + "total_tuning_character_count": 2988, + "total_billable_character_count": 3150, + "tuning_step_count": 1848, + "user_input_token_distribution": { + "sum": 341, + "min_": 0.419, + "max_": 0.421, + "mean": 0.417, + "median": 0.622, + "p5": 0.165, + "p95": 0.222, + "buckets": [{"count": 0.553, "left": 0.427, "right": 0.542}], + }, + "user_output_token_distribution": {}, + "user_message_per_example_distribution": {}, + "user_dataset_examples": [ + { + "role": "role_value", + "parts": [ + { + "text": "text_value", + "inline_data": { + "mime_type": "mime_type_value", + "data": b"data_blob", + }, + "file_data": { + "mime_type": "mime_type_value", + "file_uri": "file_uri_value", + }, + "function_call": { + "name": "name_value", + "args": {"fields": {}}, + }, + "function_response": { + "name": "name_value", + "response": {}, + }, + "video_metadata": { + "start_offset": {"seconds": 751, "nanos": 543}, + "end_offset": {}, + }, + } + ], + } + ], + } + }, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = genai_tuning_service.CreateTuningJobRequest.meta.fields["tuning_job"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["tuning_job"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["tuning_job"][field])): + del request_init["tuning_job"][field][i][subfield] + else: + del request_init["tuning_job"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gca_tuning_job.TuningJob( + name="name_value", + tuned_model_display_name="tuned_model_display_name_value", + description="description_value", + state=job_state.JobState.JOB_STATE_QUEUED, + experiment="experiment_value", + base_model="base_model_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_tuning_job.TuningJob.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_tuning_job(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_tuning_job.TuningJob) + assert response.name == "name_value" + assert response.tuned_model_display_name == "tuned_model_display_name_value" + assert response.description == "description_value" + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.experiment == "experiment_value" + + +def test_create_tuning_job_rest_required_fields( + request_type=genai_tuning_service.CreateTuningJobRequest, +): + transport_class = transports.GenAiTuningServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_tuning_job._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_tuning_job._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gca_tuning_job.TuningJob() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gca_tuning_job.TuningJob.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_tuning_job(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_tuning_job_rest_unset_required_fields(): + transport = transports.GenAiTuningServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_tuning_job._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "tuningJob", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_tuning_job_rest_interceptors(null_interceptor): + transport = transports.GenAiTuningServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenAiTuningServiceRestInterceptor(), + ) + client = GenAiTuningServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenAiTuningServiceRestInterceptor, "post_create_tuning_job" + ) as post, mock.patch.object( + transports.GenAiTuningServiceRestInterceptor, "pre_create_tuning_job" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = genai_tuning_service.CreateTuningJobRequest.pb( + genai_tuning_service.CreateTuningJobRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gca_tuning_job.TuningJob.to_json( + gca_tuning_job.TuningJob() + ) + + request = genai_tuning_service.CreateTuningJobRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gca_tuning_job.TuningJob() + + client.create_tuning_job( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_tuning_job_rest_bad_request( + transport: str = "rest", request_type=genai_tuning_service.CreateTuningJobRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_tuning_job(request) + + +def test_create_tuning_job_rest_flattened(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gca_tuning_job.TuningJob() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + tuning_job=gca_tuning_job.TuningJob(base_model="base_model_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_tuning_job.TuningJob.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_tuning_job(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/tuningJobs" % client.transport._host, + args[1], + ) + + +def test_create_tuning_job_rest_flattened_error(transport: str = "rest"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_tuning_job( + genai_tuning_service.CreateTuningJobRequest(), + parent="parent_value", + tuning_job=gca_tuning_job.TuningJob(base_model="base_model_value"), + ) + + +def test_create_tuning_job_rest_error(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + genai_tuning_service.GetTuningJobRequest, + dict, + ], +) +def test_get_tuning_job_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/tuningJobs/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = tuning_job.TuningJob( + name="name_value", + tuned_model_display_name="tuned_model_display_name_value", + description="description_value", + state=job_state.JobState.JOB_STATE_QUEUED, + experiment="experiment_value", + base_model="base_model_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = tuning_job.TuningJob.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_tuning_job(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, tuning_job.TuningJob) + assert response.name == "name_value" + assert response.tuned_model_display_name == "tuned_model_display_name_value" + assert response.description == "description_value" + assert response.state == job_state.JobState.JOB_STATE_QUEUED + assert response.experiment == "experiment_value" + + +def test_get_tuning_job_rest_required_fields( + request_type=genai_tuning_service.GetTuningJobRequest, +): + transport_class = transports.GenAiTuningServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_tuning_job._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_tuning_job._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = tuning_job.TuningJob() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = tuning_job.TuningJob.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_tuning_job(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_tuning_job_rest_unset_required_fields(): + transport = transports.GenAiTuningServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_tuning_job._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_tuning_job_rest_interceptors(null_interceptor): + transport = transports.GenAiTuningServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenAiTuningServiceRestInterceptor(), + ) + client = GenAiTuningServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenAiTuningServiceRestInterceptor, "post_get_tuning_job" + ) as post, mock.patch.object( + transports.GenAiTuningServiceRestInterceptor, "pre_get_tuning_job" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = genai_tuning_service.GetTuningJobRequest.pb( + genai_tuning_service.GetTuningJobRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = tuning_job.TuningJob.to_json(tuning_job.TuningJob()) + + request = genai_tuning_service.GetTuningJobRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = tuning_job.TuningJob() + + client.get_tuning_job( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_tuning_job_rest_bad_request( + transport: str = "rest", request_type=genai_tuning_service.GetTuningJobRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/tuningJobs/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_tuning_job(request) + + +def test_get_tuning_job_rest_flattened(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = tuning_job.TuningJob() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/tuningJobs/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = tuning_job.TuningJob.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_tuning_job(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/tuningJobs/*}" % client.transport._host, + args[1], + ) + + +def test_get_tuning_job_rest_flattened_error(transport: str = "rest"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_tuning_job( + genai_tuning_service.GetTuningJobRequest(), + name="name_value", + ) + + +def test_get_tuning_job_rest_error(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + genai_tuning_service.ListTuningJobsRequest, + dict, + ], +) +def test_list_tuning_jobs_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = genai_tuning_service.ListTuningJobsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = genai_tuning_service.ListTuningJobsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_tuning_jobs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTuningJobsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_tuning_jobs_rest_required_fields( + request_type=genai_tuning_service.ListTuningJobsRequest, +): + transport_class = transports.GenAiTuningServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_tuning_jobs._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_tuning_jobs._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = genai_tuning_service.ListTuningJobsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = genai_tuning_service.ListTuningJobsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_tuning_jobs(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_tuning_jobs_rest_unset_required_fields(): + transport = transports.GenAiTuningServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_tuning_jobs._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_tuning_jobs_rest_interceptors(null_interceptor): + transport = transports.GenAiTuningServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenAiTuningServiceRestInterceptor(), + ) + client = GenAiTuningServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenAiTuningServiceRestInterceptor, "post_list_tuning_jobs" + ) as post, mock.patch.object( + transports.GenAiTuningServiceRestInterceptor, "pre_list_tuning_jobs" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = genai_tuning_service.ListTuningJobsRequest.pb( + genai_tuning_service.ListTuningJobsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = genai_tuning_service.ListTuningJobsResponse.to_json( + genai_tuning_service.ListTuningJobsResponse() + ) + + request = genai_tuning_service.ListTuningJobsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = genai_tuning_service.ListTuningJobsResponse() + + client.list_tuning_jobs( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_tuning_jobs_rest_bad_request( + transport: str = "rest", request_type=genai_tuning_service.ListTuningJobsRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_tuning_jobs(request) + + +def test_list_tuning_jobs_rest_flattened(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = genai_tuning_service.ListTuningJobsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = genai_tuning_service.ListTuningJobsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_tuning_jobs(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/tuningJobs" % client.transport._host, + args[1], + ) + + +def test_list_tuning_jobs_rest_flattened_error(transport: str = "rest"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tuning_jobs( + genai_tuning_service.ListTuningJobsRequest(), + parent="parent_value", + ) + + +def test_list_tuning_jobs_rest_pager(transport: str = "rest"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + tuning_job.TuningJob(), + tuning_job.TuningJob(), + ], + next_page_token="abc", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[], + next_page_token="def", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + ], + next_page_token="ghi", + ), + genai_tuning_service.ListTuningJobsResponse( + tuning_jobs=[ + tuning_job.TuningJob(), + tuning_job.TuningJob(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + genai_tuning_service.ListTuningJobsResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_tuning_jobs(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, tuning_job.TuningJob) for i in results) + + pages = list(client.list_tuning_jobs(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + genai_tuning_service.CancelTuningJobRequest, + dict, + ], +) +def test_cancel_tuning_job_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/tuningJobs/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.cancel_tuning_job(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_tuning_job_rest_required_fields( + request_type=genai_tuning_service.CancelTuningJobRequest, +): + transport_class = transports.GenAiTuningServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).cancel_tuning_job._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).cancel_tuning_job._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_tuning_job(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_cancel_tuning_job_rest_unset_required_fields(): + transport = transports.GenAiTuningServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.cancel_tuning_job._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_cancel_tuning_job_rest_interceptors(null_interceptor): + transport = transports.GenAiTuningServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.GenAiTuningServiceRestInterceptor(), + ) + client = GenAiTuningServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.GenAiTuningServiceRestInterceptor, "pre_cancel_tuning_job" + ) as pre: + pre.assert_not_called() + pb_message = genai_tuning_service.CancelTuningJobRequest.pb( + genai_tuning_service.CancelTuningJobRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = genai_tuning_service.CancelTuningJobRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.cancel_tuning_job( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + + +def test_cancel_tuning_job_rest_bad_request( + transport: str = "rest", request_type=genai_tuning_service.CancelTuningJobRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/tuningJobs/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_tuning_job(request) + + +def test_cancel_tuning_job_rest_flattened(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/tuningJobs/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "" + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.cancel_tuning_job(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/tuningJobs/*}:cancel" + % client.transport._host, + args[1], + ) + + +def test_cancel_tuning_job_rest_flattened_error(transport: str = "rest"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_tuning_job( + genai_tuning_service.CancelTuningJobRequest(), + name="name_value", + ) + + +def test_cancel_tuning_job_rest_error(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.GenAiTuningServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.GenAiTuningServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GenAiTuningServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.GenAiTuningServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = GenAiTuningServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = GenAiTuningServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.GenAiTuningServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GenAiTuningServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.GenAiTuningServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = GenAiTuningServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.GenAiTuningServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.GenAiTuningServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenAiTuningServiceGrpcTransport, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + transports.GenAiTuningServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = GenAiTuningServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.GenAiTuningServiceGrpcTransport, + ) + + +def test_gen_ai_tuning_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.GenAiTuningServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_gen_ai_tuning_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1.services.gen_ai_tuning_service.transports.GenAiTuningServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.GenAiTuningServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_tuning_job", + "get_tuning_job", + "list_tuning_jobs", + "cancel_tuning_job", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + "get_location", + "list_locations", + "get_operation", + "wait_operation", + "cancel_operation", + "delete_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_gen_ai_tuning_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1.services.gen_ai_tuning_service.transports.GenAiTuningServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GenAiTuningServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_gen_ai_tuning_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1.services.gen_ai_tuning_service.transports.GenAiTuningServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GenAiTuningServiceTransport() + adc.assert_called_once() + + +def test_gen_ai_tuning_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + GenAiTuningServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenAiTuningServiceGrpcTransport, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + ], +) +def test_gen_ai_tuning_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenAiTuningServiceGrpcTransport, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + transports.GenAiTuningServiceRestTransport, + ], +) +def test_gen_ai_tuning_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.GenAiTuningServiceGrpcTransport, grpc_helpers), + (transports.GenAiTuningServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_gen_ai_tuning_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenAiTuningServiceGrpcTransport, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + ], +) +def test_gen_ai_tuning_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_gen_ai_tuning_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.GenAiTuningServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_gen_ai_tuning_service_host_no_port(transport_name): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_gen_ai_tuning_service_host_with_port(transport_name): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_gen_ai_tuning_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = GenAiTuningServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = GenAiTuningServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_tuning_job._session + session2 = client2.transport.create_tuning_job._session + assert session1 != session2 + session1 = client1.transport.get_tuning_job._session + session2 = client2.transport.get_tuning_job._session + assert session1 != session2 + session1 = client1.transport.list_tuning_jobs._session + session2 = client2.transport.list_tuning_jobs._session + assert session1 != session2 + session1 = client1.transport.cancel_tuning_job._session + session2 = client2.transport.cancel_tuning_job._session + assert session1 != session2 + + +def test_gen_ai_tuning_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.GenAiTuningServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_gen_ai_tuning_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.GenAiTuningServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenAiTuningServiceGrpcTransport, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + ], +) +def test_gen_ai_tuning_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.GenAiTuningServiceGrpcTransport, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + ], +) +def test_gen_ai_tuning_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_context_path(): + project = "squid" + location = "clam" + metadata_store = "whelk" + context = "octopus" + expected = "projects/{project}/locations/{location}/metadataStores/{metadata_store}/contexts/{context}".format( + project=project, + location=location, + metadata_store=metadata_store, + context=context, + ) + actual = GenAiTuningServiceClient.context_path( + project, location, metadata_store, context + ) + assert expected == actual + + +def test_parse_context_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "metadata_store": "cuttlefish", + "context": "mussel", + } + path = GenAiTuningServiceClient.context_path(**expected) + + # Check that the path construction is reversible. + actual = GenAiTuningServiceClient.parse_context_path(path) + assert expected == actual + + +def test_endpoint_path(): + project = "winkle" + location = "nautilus" + endpoint = "scallop" + expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format( + project=project, + location=location, + endpoint=endpoint, + ) + actual = GenAiTuningServiceClient.endpoint_path(project, location, endpoint) + assert expected == actual + + +def test_parse_endpoint_path(): + expected = { + "project": "abalone", + "location": "squid", + "endpoint": "clam", + } + path = GenAiTuningServiceClient.endpoint_path(**expected) + + # Check that the path construction is reversible. + actual = GenAiTuningServiceClient.parse_endpoint_path(path) + assert expected == actual + + +def test_model_path(): + project = "whelk" + location = "octopus" + model = "oyster" + expected = "projects/{project}/locations/{location}/models/{model}".format( + project=project, + location=location, + model=model, + ) + actual = GenAiTuningServiceClient.model_path(project, location, model) + assert expected == actual + + +def test_parse_model_path(): + expected = { + "project": "nudibranch", + "location": "cuttlefish", + "model": "mussel", + } + path = GenAiTuningServiceClient.model_path(**expected) + + # Check that the path construction is reversible. + actual = GenAiTuningServiceClient.parse_model_path(path) + assert expected == actual + + +def test_tuning_job_path(): + project = "winkle" + location = "nautilus" + tuning_job = "scallop" + expected = "projects/{project}/locations/{location}/tuningJobs/{tuning_job}".format( + project=project, + location=location, + tuning_job=tuning_job, + ) + actual = GenAiTuningServiceClient.tuning_job_path(project, location, tuning_job) + assert expected == actual + + +def test_parse_tuning_job_path(): + expected = { + "project": "abalone", + "location": "squid", + "tuning_job": "clam", + } + path = GenAiTuningServiceClient.tuning_job_path(**expected) + + # Check that the path construction is reversible. + actual = GenAiTuningServiceClient.parse_tuning_job_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = GenAiTuningServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = GenAiTuningServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = GenAiTuningServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = GenAiTuningServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = GenAiTuningServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = GenAiTuningServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = GenAiTuningServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = GenAiTuningServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = GenAiTuningServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format( + project=project, + ) + actual = GenAiTuningServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = GenAiTuningServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = GenAiTuningServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = GenAiTuningServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = GenAiTuningServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = GenAiTuningServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.GenAiTuningServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.GenAiTuningServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = GenAiTuningServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_get_location_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.GetLocationRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_location(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.GetLocationRequest, + dict, + ], +) +def test_get_location_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.Location() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_location(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_list_locations_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.ListLocationsRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({"name": "projects/sample1"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_locations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.ListLocationsRequest, + dict, + ], +) +def test_list_locations_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_locations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_get_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_set_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_test_iam_permissions_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + +def test_cancel_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.CancelOperationRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.DeleteOperationRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.DeleteOperationRequest, + dict, + ], +) +def test_delete_operation_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.ListOperationsRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_wait_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.WaitOperationRequest +): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.WaitOperationRequest, + dict, + ], +) +def test_wait_operation_rest(request_type): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.wait_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_delete_operation(transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_delete_operation_from_dict(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_wait_operation(transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc_asyncio"): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_wait_operation_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_wait_operation_from_dict(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_list_locations_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_locations_from_dict(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_get_location_field_headers(): + client = GenAiTuningServiceClient(credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +def test_get_location_from_dict(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +def test_get_iam_policy(transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_test_iam_permissions(transport: str = "grpc"): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = GenAiTuningServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = GenAiTuningServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (GenAiTuningServiceClient, transports.GenAiTuningServiceGrpcTransport), + ( + GenAiTuningServiceAsyncClient, + transports.GenAiTuningServiceGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/tests/unit/gapic/aiplatform_v1/test_job_service.py b/tests/unit/gapic/aiplatform_v1/test_job_service.py index 51b18a0f28..8a6ad0f417 100644 --- a/tests/unit/gapic/aiplatform_v1/test_job_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_job_service.py @@ -13521,6 +13521,7 @@ def test_create_custom_job_rest(request_type): "name": "name_value", "display_name": "display_name_value", "job_spec": { + "persistent_resource_id": "persistent_resource_id_value", "worker_pool_specs": [ { "container_spec": { @@ -16712,6 +16713,7 @@ def test_create_hyperparameter_tuning_job_rest(request_type): "parallel_trial_count": 2128, "max_failed_trial_count": 2317, "trial_job_spec": { + "persistent_resource_id": "persistent_resource_id_value", "worker_pool_specs": [ { "container_spec": { @@ -18378,6 +18380,7 @@ def test_create_nas_job_rest(request_type): "metric": {"metric_id": "metric_id_value", "goal": 1}, "search_trial_spec": { "search_trial_job_spec": { + "persistent_resource_id": "persistent_resource_id_value", "worker_pool_specs": [ { "container_spec": { @@ -26153,10 +26156,38 @@ def test_parse_notification_channel_path(): assert expected == actual -def test_tensorboard_path(): +def test_persistent_resource_path(): project = "cuttlefish" location = "mussel" - tensorboard = "winkle" + persistent_resource = "winkle" + expected = "projects/{project}/locations/{location}/persistentResources/{persistent_resource}".format( + project=project, + location=location, + persistent_resource=persistent_resource, + ) + actual = JobServiceClient.persistent_resource_path( + project, location, persistent_resource + ) + assert expected == actual + + +def test_parse_persistent_resource_path(): + expected = { + "project": "nautilus", + "location": "scallop", + "persistent_resource": "abalone", + } + path = JobServiceClient.persistent_resource_path(**expected) + + # Check that the path construction is reversible. + actual = JobServiceClient.parse_persistent_resource_path(path) + assert expected == actual + + +def test_tensorboard_path(): + project = "squid" + location = "clam" + tensorboard = "whelk" expected = ( "projects/{project}/locations/{location}/tensorboards/{tensorboard}".format( project=project, @@ -26170,9 +26201,9 @@ def test_tensorboard_path(): def test_parse_tensorboard_path(): expected = { - "project": "nautilus", - "location": "scallop", - "tensorboard": "abalone", + "project": "octopus", + "location": "oyster", + "tensorboard": "nudibranch", } path = JobServiceClient.tensorboard_path(**expected) @@ -26182,10 +26213,10 @@ def test_parse_tensorboard_path(): def test_trial_path(): - project = "squid" - location = "clam" - study = "whelk" - trial = "octopus" + project = "cuttlefish" + location = "mussel" + study = "winkle" + trial = "nautilus" expected = ( "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format( project=project, @@ -26200,10 +26231,10 @@ def test_trial_path(): def test_parse_trial_path(): expected = { - "project": "oyster", - "location": "nudibranch", - "study": "cuttlefish", - "trial": "mussel", + "project": "scallop", + "location": "abalone", + "study": "squid", + "trial": "clam", } path = JobServiceClient.trial_path(**expected) @@ -26213,7 +26244,7 @@ def test_parse_trial_path(): def test_common_billing_account_path(): - billing_account = "winkle" + billing_account = "whelk" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -26223,7 +26254,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "nautilus", + "billing_account": "octopus", } path = JobServiceClient.common_billing_account_path(**expected) @@ -26233,7 +26264,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "scallop" + folder = "oyster" expected = "folders/{folder}".format( folder=folder, ) @@ -26243,7 +26274,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "abalone", + "folder": "nudibranch", } path = JobServiceClient.common_folder_path(**expected) @@ -26253,7 +26284,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "squid" + organization = "cuttlefish" expected = "organizations/{organization}".format( organization=organization, ) @@ -26263,7 +26294,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "clam", + "organization": "mussel", } path = JobServiceClient.common_organization_path(**expected) @@ -26273,7 +26304,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "whelk" + project = "winkle" expected = "projects/{project}".format( project=project, ) @@ -26283,7 +26314,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "octopus", + "project": "nautilus", } path = JobServiceClient.common_project_path(**expected) @@ -26293,8 +26324,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "oyster" - location = "nudibranch" + project = "scallop" + location = "abalone" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -26305,8 +26336,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "cuttlefish", - "location": "mussel", + "project": "squid", + "location": "clam", } path = JobServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_migration_service.py b/tests/unit/gapic/aiplatform_v1/test_migration_service.py index 7d7ddfd415..c3ccb36026 100644 --- a/tests/unit/gapic/aiplatform_v1/test_migration_service.py +++ b/tests/unit/gapic/aiplatform_v1/test_migration_service.py @@ -3255,22 +3255,19 @@ def test_parse_annotated_dataset_path(): def test_dataset_path(): project = "cuttlefish" - location = "mussel" - dataset = "winkle" - expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( + dataset = "mussel" + expected = "projects/{project}/datasets/{dataset}".format( project=project, - location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, location, dataset) + actual = MigrationServiceClient.dataset_path(project, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "nautilus", - "location": "scallop", - "dataset": "abalone", + "project": "winkle", + "dataset": "nautilus", } path = MigrationServiceClient.dataset_path(**expected) @@ -3280,19 +3277,22 @@ def test_parse_dataset_path(): def test_dataset_path(): - project = "squid" - dataset = "clam" - expected = "projects/{project}/datasets/{dataset}".format( + project = "scallop" + location = "abalone" + dataset = "squid" + expected = "projects/{project}/locations/{location}/datasets/{dataset}".format( project=project, + location=location, dataset=dataset, ) - actual = MigrationServiceClient.dataset_path(project, dataset) + actual = MigrationServiceClient.dataset_path(project, location, dataset) assert expected == actual def test_parse_dataset_path(): expected = { - "project": "whelk", + "project": "clam", + "location": "whelk", "dataset": "octopus", } path = MigrationServiceClient.dataset_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1/test_notebook_service.py b/tests/unit/gapic/aiplatform_v1/test_notebook_service.py new file mode 100644 index 0000000000..c6b3d5384b --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1/test_notebook_service.py @@ -0,0 +1,10794 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from google.api_core import api_core_version +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.notebook_service import ( + NotebookServiceAsyncClient, +) +from google.cloud.aiplatform_v1.services.notebook_service import NotebookServiceClient +from google.cloud.aiplatform_v1.services.notebook_service import pagers +from google.cloud.aiplatform_v1.services.notebook_service import transports +from google.cloud.aiplatform_v1.types import accelerator_type +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import network_spec +from google.cloud.aiplatform_v1.types import notebook_euc_config +from google.cloud.aiplatform_v1.types import notebook_idle_shutdown_config +from google.cloud.aiplatform_v1.types import notebook_runtime +from google.cloud.aiplatform_v1.types import notebook_runtime as gca_notebook_runtime +from google.cloud.aiplatform_v1.types import notebook_runtime_template_ref +from google.cloud.aiplatform_v1.types import notebook_service +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert NotebookServiceClient._get_default_mtls_endpoint(None) is None + assert ( + NotebookServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + NotebookServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + NotebookServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + NotebookServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + NotebookServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +def test__read_environment_variables(): + assert NotebookServiceClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert NotebookServiceClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert NotebookServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + NotebookServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert NotebookServiceClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert NotebookServiceClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert NotebookServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + NotebookServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert NotebookServiceClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert NotebookServiceClient._get_client_cert_source(None, False) is None + assert ( + NotebookServiceClient._get_client_cert_source(mock_provided_cert_source, False) + is None + ) + assert ( + NotebookServiceClient._get_client_cert_source(mock_provided_cert_source, True) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + NotebookServiceClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + NotebookServiceClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + NotebookServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceClient), +) +@mock.patch.object( + NotebookServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = NotebookServiceClient._DEFAULT_UNIVERSE + default_endpoint = NotebookServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = NotebookServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + NotebookServiceClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + NotebookServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == NotebookServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + NotebookServiceClient._get_api_endpoint(None, None, default_universe, "auto") + == default_endpoint + ) + assert ( + NotebookServiceClient._get_api_endpoint(None, None, default_universe, "always") + == NotebookServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + NotebookServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == NotebookServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + NotebookServiceClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + NotebookServiceClient._get_api_endpoint(None, None, default_universe, "never") + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + NotebookServiceClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + NotebookServiceClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + NotebookServiceClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + NotebookServiceClient._get_universe_domain(None, None) + == NotebookServiceClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + NotebookServiceClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (NotebookServiceClient, transports.NotebookServiceGrpcTransport, "grpc"), + (NotebookServiceClient, transports.NotebookServiceRestTransport, "rest"), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (NotebookServiceClient, "grpc"), + (NotebookServiceAsyncClient, "grpc_asyncio"), + (NotebookServiceClient, "rest"), + ], +) +def test_notebook_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.NotebookServiceGrpcTransport, "grpc"), + (transports.NotebookServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.NotebookServiceRestTransport, "rest"), + ], +) +def test_notebook_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (NotebookServiceClient, "grpc"), + (NotebookServiceAsyncClient, "grpc_asyncio"), + (NotebookServiceClient, "rest"), + ], +) +def test_notebook_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +def test_notebook_service_client_get_transport_class(): + transport = NotebookServiceClient.get_transport_class() + available_transports = [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceRestTransport, + ] + assert transport in available_transports + + transport = NotebookServiceClient.get_transport_class("grpc") + assert transport == transports.NotebookServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (NotebookServiceClient, transports.NotebookServiceGrpcTransport, "grpc"), + ( + NotebookServiceAsyncClient, + transports.NotebookServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (NotebookServiceClient, transports.NotebookServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + NotebookServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceClient), +) +@mock.patch.object( + NotebookServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceAsyncClient), +) +def test_notebook_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(NotebookServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(NotebookServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + NotebookServiceClient, + transports.NotebookServiceGrpcTransport, + "grpc", + "true", + ), + ( + NotebookServiceAsyncClient, + transports.NotebookServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + NotebookServiceClient, + transports.NotebookServiceGrpcTransport, + "grpc", + "false", + ), + ( + NotebookServiceAsyncClient, + transports.NotebookServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + NotebookServiceClient, + transports.NotebookServiceRestTransport, + "rest", + "true", + ), + ( + NotebookServiceClient, + transports.NotebookServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + NotebookServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceClient), +) +@mock.patch.object( + NotebookServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_notebook_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [NotebookServiceClient, NotebookServiceAsyncClient] +) +@mock.patch.object( + NotebookServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NotebookServiceClient), +) +@mock.patch.object( + NotebookServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NotebookServiceAsyncClient), +) +def test_notebook_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", [NotebookServiceClient, NotebookServiceAsyncClient] +) +@mock.patch.object( + NotebookServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceClient), +) +@mock.patch.object( + NotebookServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceAsyncClient), +) +def test_notebook_service_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = NotebookServiceClient._DEFAULT_UNIVERSE + default_endpoint = NotebookServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = NotebookServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (NotebookServiceClient, transports.NotebookServiceGrpcTransport, "grpc"), + ( + NotebookServiceAsyncClient, + transports.NotebookServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (NotebookServiceClient, transports.NotebookServiceRestTransport, "rest"), + ], +) +def test_notebook_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + NotebookServiceClient, + transports.NotebookServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + NotebookServiceAsyncClient, + transports.NotebookServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + (NotebookServiceClient, transports.NotebookServiceRestTransport, "rest", None), + ], +) +def test_notebook_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_notebook_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1.services.notebook_service.transports.NotebookServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = NotebookServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + NotebookServiceClient, + transports.NotebookServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + NotebookServiceAsyncClient, + transports.NotebookServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_notebook_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.CreateNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_create_notebook_runtime_template(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.CreateNotebookRuntimeTemplateRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_notebook_runtime_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + client.create_notebook_runtime_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.CreateNotebookRuntimeTemplateRequest() + + +def test_create_notebook_runtime_template_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.CreateNotebookRuntimeTemplateRequest( + parent="parent_value", + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + client.create_notebook_runtime_template(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.CreateNotebookRuntimeTemplateRequest( + parent="parent_value", + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_notebook_runtime_template_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_notebook_runtime_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.CreateNotebookRuntimeTemplateRequest() + + +@pytest.mark.asyncio +async def test_create_notebook_runtime_template_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.CreateNotebookRuntimeTemplateRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.CreateNotebookRuntimeTemplateRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_notebook_runtime_template_async_from_dict(): + await test_create_notebook_runtime_template_async(request_type=dict) + + +def test_create_notebook_runtime_template_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.CreateNotebookRuntimeTemplateRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_notebook_runtime_template_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.CreateNotebookRuntimeTemplateRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_notebook_runtime_template_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_notebook_runtime_template( + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].notebook_runtime_template + mock_val = notebook_runtime.NotebookRuntimeTemplate(name="name_value") + assert arg == mock_val + arg = args[0].notebook_runtime_template_id + mock_val = "notebook_runtime_template_id_value" + assert arg == mock_val + + +def test_create_notebook_runtime_template_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_notebook_runtime_template( + notebook_service.CreateNotebookRuntimeTemplateRequest(), + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_notebook_runtime_template_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_notebook_runtime_template( + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].notebook_runtime_template + mock_val = notebook_runtime.NotebookRuntimeTemplate(name="name_value") + assert arg == mock_val + arg = args[0].notebook_runtime_template_id + mock_val = "notebook_runtime_template_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_notebook_runtime_template_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_notebook_runtime_template( + notebook_service.CreateNotebookRuntimeTemplateRequest(), + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.GetNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_get_notebook_runtime_template(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_runtime.NotebookRuntimeTemplate( + name="name_value", + display_name="display_name_value", + description="description_value", + is_default=True, + service_account="service_account_value", + etag="etag_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + response = client.get_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.GetNotebookRuntimeTemplateRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntimeTemplate) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.is_default is True + assert response.service_account == "service_account_value" + assert response.etag == "etag_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +def test_get_notebook_runtime_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + client.get_notebook_runtime_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookRuntimeTemplateRequest() + + +def test_get_notebook_runtime_template_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.GetNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + client.get_notebook_runtime_template(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookRuntimeTemplateRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_template_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntimeTemplate( + name="name_value", + display_name="display_name_value", + description="description_value", + is_default=True, + service_account="service_account_value", + etag="etag_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + ) + response = await client.get_notebook_runtime_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookRuntimeTemplateRequest() + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_template_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.GetNotebookRuntimeTemplateRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntimeTemplate( + name="name_value", + display_name="display_name_value", + description="description_value", + is_default=True, + service_account="service_account_value", + etag="etag_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + ) + response = await client.get_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.GetNotebookRuntimeTemplateRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntimeTemplate) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.is_default is True + assert response.service_account == "service_account_value" + assert response.etag == "etag_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_template_async_from_dict(): + await test_get_notebook_runtime_template_async(request_type=dict) + + +def test_get_notebook_runtime_template_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.GetNotebookRuntimeTemplateRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + call.return_value = notebook_runtime.NotebookRuntimeTemplate() + client.get_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_template_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.GetNotebookRuntimeTemplateRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntimeTemplate() + ) + await client.get_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_notebook_runtime_template_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_runtime.NotebookRuntimeTemplate() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_notebook_runtime_template( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_notebook_runtime_template_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notebook_runtime_template( + notebook_service.GetNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_template_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_runtime.NotebookRuntimeTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntimeTemplate() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_notebook_runtime_template( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_template_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_notebook_runtime_template( + notebook_service.GetNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.ListNotebookRuntimeTemplatesRequest, + dict, + ], +) +def test_list_notebook_runtime_templates(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookRuntimeTemplatesResponse( + next_page_token="next_page_token_value", + ) + response = client.list_notebook_runtime_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.ListNotebookRuntimeTemplatesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimeTemplatesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_notebook_runtime_templates_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + client.list_notebook_runtime_templates() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookRuntimeTemplatesRequest() + + +def test_list_notebook_runtime_templates_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.ListNotebookRuntimeTemplatesRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + order_by="order_by_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + client.list_notebook_runtime_templates(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookRuntimeTemplatesRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + order_by="order_by_value", + ) + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimeTemplatesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_notebook_runtime_templates() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookRuntimeTemplatesRequest() + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.ListNotebookRuntimeTemplatesRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimeTemplatesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_notebook_runtime_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.ListNotebookRuntimeTemplatesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimeTemplatesAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_async_from_dict(): + await test_list_notebook_runtime_templates_async(request_type=dict) + + +def test_list_notebook_runtime_templates_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.ListNotebookRuntimeTemplatesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + call.return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + client.list_notebook_runtime_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.ListNotebookRuntimeTemplatesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimeTemplatesResponse() + ) + await client.list_notebook_runtime_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_notebook_runtime_templates_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_notebook_runtime_templates( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_notebook_runtime_templates_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_notebook_runtime_templates( + notebook_service.ListNotebookRuntimeTemplatesRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimeTemplatesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_notebook_runtime_templates( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_notebook_runtime_templates( + notebook_service.ListNotebookRuntimeTemplatesRequest(), + parent="parent_value", + ) + + +def test_list_notebook_runtime_templates_pager(transport_name: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_notebook_runtime_templates(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, notebook_runtime.NotebookRuntimeTemplate) for i in results + ) + + +def test_list_notebook_runtime_templates_pages(transport_name: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + ), + RuntimeError, + ) + pages = list(client.list_notebook_runtime_templates(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_async_pager(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_notebook_runtime_templates( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, notebook_runtime.NotebookRuntimeTemplate) for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_async_pages(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_notebook_runtime_templates(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.DeleteNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_delete_notebook_runtime_template(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.DeleteNotebookRuntimeTemplateRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_notebook_runtime_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + client.delete_notebook_runtime_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookRuntimeTemplateRequest() + + +def test_delete_notebook_runtime_template_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.DeleteNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + client.delete_notebook_runtime_template(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookRuntimeTemplateRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_template_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_notebook_runtime_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookRuntimeTemplateRequest() + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_template_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.DeleteNotebookRuntimeTemplateRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.DeleteNotebookRuntimeTemplateRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_template_async_from_dict(): + await test_delete_notebook_runtime_template_async(request_type=dict) + + +def test_delete_notebook_runtime_template_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.DeleteNotebookRuntimeTemplateRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_template_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.DeleteNotebookRuntimeTemplateRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_notebook_runtime_template_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_notebook_runtime_template( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_notebook_runtime_template_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_notebook_runtime_template( + notebook_service.DeleteNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_template_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_notebook_runtime_template( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_template_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_notebook_runtime_template( + notebook_service.DeleteNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.AssignNotebookRuntimeRequest, + dict, + ], +) +def test_assign_notebook_runtime(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.assign_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.AssignNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_assign_notebook_runtime_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + client.assign_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.AssignNotebookRuntimeRequest() + + +def test_assign_notebook_runtime_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.AssignNotebookRuntimeRequest( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime_id="notebook_runtime_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + client.assign_notebook_runtime(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.AssignNotebookRuntimeRequest( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime_id="notebook_runtime_id_value", + ) + + +@pytest.mark.asyncio +async def test_assign_notebook_runtime_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.assign_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.AssignNotebookRuntimeRequest() + + +@pytest.mark.asyncio +async def test_assign_notebook_runtime_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.AssignNotebookRuntimeRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.assign_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.AssignNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_assign_notebook_runtime_async_from_dict(): + await test_assign_notebook_runtime_async(request_type=dict) + + +def test_assign_notebook_runtime_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.AssignNotebookRuntimeRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.assign_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_assign_notebook_runtime_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.AssignNotebookRuntimeRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.assign_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_assign_notebook_runtime_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.assign_notebook_runtime( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].notebook_runtime_template + mock_val = "notebook_runtime_template_value" + assert arg == mock_val + arg = args[0].notebook_runtime + mock_val = gca_notebook_runtime.NotebookRuntime(name="name_value") + assert arg == mock_val + arg = args[0].notebook_runtime_id + mock_val = "notebook_runtime_id_value" + assert arg == mock_val + + +def test_assign_notebook_runtime_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.assign_notebook_runtime( + notebook_service.AssignNotebookRuntimeRequest(), + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", + ) + + +@pytest.mark.asyncio +async def test_assign_notebook_runtime_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.assign_notebook_runtime( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].notebook_runtime_template + mock_val = "notebook_runtime_template_value" + assert arg == mock_val + arg = args[0].notebook_runtime + mock_val = gca_notebook_runtime.NotebookRuntime(name="name_value") + assert arg == mock_val + arg = args[0].notebook_runtime_id + mock_val = "notebook_runtime_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_assign_notebook_runtime_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.assign_notebook_runtime( + notebook_service.AssignNotebookRuntimeRequest(), + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.GetNotebookRuntimeRequest, + dict, + ], +) +def test_get_notebook_runtime(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_runtime.NotebookRuntime( + name="name_value", + runtime_user="runtime_user_value", + proxy_uri="proxy_uri_value", + health_state=notebook_runtime.NotebookRuntime.HealthState.HEALTHY, + display_name="display_name_value", + description="description_value", + service_account="service_account_value", + runtime_state=notebook_runtime.NotebookRuntime.RuntimeState.RUNNING, + is_upgradable=True, + version="version_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + response = client.get_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.GetNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntime) + assert response.name == "name_value" + assert response.runtime_user == "runtime_user_value" + assert response.proxy_uri == "proxy_uri_value" + assert response.health_state == notebook_runtime.NotebookRuntime.HealthState.HEALTHY + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.service_account == "service_account_value" + assert ( + response.runtime_state == notebook_runtime.NotebookRuntime.RuntimeState.RUNNING + ) + assert response.is_upgradable is True + assert response.version == "version_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +def test_get_notebook_runtime_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + client.get_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookRuntimeRequest() + + +def test_get_notebook_runtime_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.GetNotebookRuntimeRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + client.get_notebook_runtime(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookRuntimeRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntime( + name="name_value", + runtime_user="runtime_user_value", + proxy_uri="proxy_uri_value", + health_state=notebook_runtime.NotebookRuntime.HealthState.HEALTHY, + display_name="display_name_value", + description="description_value", + service_account="service_account_value", + runtime_state=notebook_runtime.NotebookRuntime.RuntimeState.RUNNING, + is_upgradable=True, + version="version_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + ) + response = await client.get_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookRuntimeRequest() + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.GetNotebookRuntimeRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntime( + name="name_value", + runtime_user="runtime_user_value", + proxy_uri="proxy_uri_value", + health_state=notebook_runtime.NotebookRuntime.HealthState.HEALTHY, + display_name="display_name_value", + description="description_value", + service_account="service_account_value", + runtime_state=notebook_runtime.NotebookRuntime.RuntimeState.RUNNING, + is_upgradable=True, + version="version_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + ) + response = await client.get_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.GetNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntime) + assert response.name == "name_value" + assert response.runtime_user == "runtime_user_value" + assert response.proxy_uri == "proxy_uri_value" + assert response.health_state == notebook_runtime.NotebookRuntime.HealthState.HEALTHY + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.service_account == "service_account_value" + assert ( + response.runtime_state == notebook_runtime.NotebookRuntime.RuntimeState.RUNNING + ) + assert response.is_upgradable is True + assert response.version == "version_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_async_from_dict(): + await test_get_notebook_runtime_async(request_type=dict) + + +def test_get_notebook_runtime_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.GetNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + call.return_value = notebook_runtime.NotebookRuntime() + client.get_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.GetNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntime() + ) + await client.get_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_notebook_runtime_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_runtime.NotebookRuntime() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_notebook_runtime_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notebook_runtime( + notebook_service.GetNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_runtime.NotebookRuntime() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntime() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_notebook_runtime( + notebook_service.GetNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.ListNotebookRuntimesRequest, + dict, + ], +) +def test_list_notebook_runtimes(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookRuntimesResponse( + next_page_token="next_page_token_value", + ) + response = client.list_notebook_runtimes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.ListNotebookRuntimesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_notebook_runtimes_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + client.list_notebook_runtimes() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookRuntimesRequest() + + +def test_list_notebook_runtimes_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.ListNotebookRuntimesRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + order_by="order_by_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + client.list_notebook_runtimes(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookRuntimesRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + order_by="order_by_value", + ) + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_notebook_runtimes() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookRuntimesRequest() + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.ListNotebookRuntimesRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_notebook_runtimes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.ListNotebookRuntimesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimesAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_async_from_dict(): + await test_list_notebook_runtimes_async(request_type=dict) + + +def test_list_notebook_runtimes_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.ListNotebookRuntimesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + call.return_value = notebook_service.ListNotebookRuntimesResponse() + client.list_notebook_runtimes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.ListNotebookRuntimesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimesResponse() + ) + await client.list_notebook_runtimes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_notebook_runtimes_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookRuntimesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_notebook_runtimes( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_notebook_runtimes_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_notebook_runtimes( + notebook_service.ListNotebookRuntimesRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookRuntimesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_notebook_runtimes( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_notebook_runtimes( + notebook_service.ListNotebookRuntimesRequest(), + parent="parent_value", + ) + + +def test_list_notebook_runtimes_pager(transport_name: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_notebook_runtimes(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, notebook_runtime.NotebookRuntime) for i in results) + + +def test_list_notebook_runtimes_pages(transport_name: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + ), + RuntimeError, + ) + pages = list(client.list_notebook_runtimes(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_async_pager(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_notebook_runtimes( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, notebook_runtime.NotebookRuntime) for i in responses) + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_async_pages(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_notebook_runtimes(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.DeleteNotebookRuntimeRequest, + dict, + ], +) +def test_delete_notebook_runtime(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.DeleteNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_notebook_runtime_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + client.delete_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookRuntimeRequest() + + +def test_delete_notebook_runtime_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.DeleteNotebookRuntimeRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + client.delete_notebook_runtime(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookRuntimeRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookRuntimeRequest() + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.DeleteNotebookRuntimeRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.DeleteNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_async_from_dict(): + await test_delete_notebook_runtime_async(request_type=dict) + + +def test_delete_notebook_runtime_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.DeleteNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.DeleteNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_notebook_runtime_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_notebook_runtime_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_notebook_runtime( + notebook_service.DeleteNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_notebook_runtime( + notebook_service.DeleteNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.UpgradeNotebookRuntimeRequest, + dict, + ], +) +def test_upgrade_notebook_runtime(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.upgrade_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.UpgradeNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_upgrade_notebook_runtime_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + client.upgrade_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.UpgradeNotebookRuntimeRequest() + + +def test_upgrade_notebook_runtime_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.UpgradeNotebookRuntimeRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + client.upgrade_notebook_runtime(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.UpgradeNotebookRuntimeRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_upgrade_notebook_runtime_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.upgrade_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.UpgradeNotebookRuntimeRequest() + + +@pytest.mark.asyncio +async def test_upgrade_notebook_runtime_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.UpgradeNotebookRuntimeRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.upgrade_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.UpgradeNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_upgrade_notebook_runtime_async_from_dict(): + await test_upgrade_notebook_runtime_async(request_type=dict) + + +def test_upgrade_notebook_runtime_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.UpgradeNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.upgrade_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_upgrade_notebook_runtime_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.UpgradeNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.upgrade_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_upgrade_notebook_runtime_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.upgrade_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_upgrade_notebook_runtime_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.upgrade_notebook_runtime( + notebook_service.UpgradeNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_upgrade_notebook_runtime_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.upgrade_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_upgrade_notebook_runtime_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.upgrade_notebook_runtime( + notebook_service.UpgradeNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.StartNotebookRuntimeRequest, + dict, + ], +) +def test_start_notebook_runtime(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.start_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.StartNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_start_notebook_runtime_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + client.start_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.StartNotebookRuntimeRequest() + + +def test_start_notebook_runtime_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.StartNotebookRuntimeRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + client.start_notebook_runtime(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.StartNotebookRuntimeRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_start_notebook_runtime_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.start_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.StartNotebookRuntimeRequest() + + +@pytest.mark.asyncio +async def test_start_notebook_runtime_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.StartNotebookRuntimeRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.start_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.StartNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_start_notebook_runtime_async_from_dict(): + await test_start_notebook_runtime_async(request_type=dict) + + +def test_start_notebook_runtime_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.StartNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.start_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_start_notebook_runtime_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.StartNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.start_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_start_notebook_runtime_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.start_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_start_notebook_runtime_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.start_notebook_runtime( + notebook_service.StartNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_start_notebook_runtime_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.start_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_start_notebook_runtime_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.start_notebook_runtime( + notebook_service.StartNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.CreateNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_create_notebook_runtime_template_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["notebook_runtime_template"] = { + "name": "name_value", + "display_name": "display_name_value", + "description": "description_value", + "is_default": True, + "machine_spec": { + "machine_type": "machine_type_value", + "accelerator_type": 1, + "accelerator_count": 1805, + "tpu_topology": "tpu_topology_value", + }, + "data_persistent_disk_spec": { + "disk_type": "disk_type_value", + "disk_size_gb": 1261, + }, + "network_spec": { + "enable_internet_access": True, + "network": "network_value", + "subnetwork": "subnetwork_value", + }, + "service_account": "service_account_value", + "etag": "etag_value", + "labels": {}, + "idle_shutdown_config": { + "idle_timeout": {"seconds": 751, "nanos": 543}, + "idle_shutdown_disabled": True, + }, + "euc_config": {"euc_disabled": True, "bypass_actas_check": True}, + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "notebook_runtime_type": 1, + "shielded_vm_config": {"enable_secure_boot": True}, + "network_tags": ["network_tags_value1", "network_tags_value2"], + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = notebook_service.CreateNotebookRuntimeTemplateRequest.meta.fields[ + "notebook_runtime_template" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init[ + "notebook_runtime_template" + ].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range( + 0, len(request_init["notebook_runtime_template"][field]) + ): + del request_init["notebook_runtime_template"][field][i][subfield] + else: + del request_init["notebook_runtime_template"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_notebook_runtime_template(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_notebook_runtime_template_rest_required_fields( + request_type=notebook_service.CreateNotebookRuntimeTemplateRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_notebook_runtime_template._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("notebook_runtime_template_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_notebook_runtime_template(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_notebook_runtime_template_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.create_notebook_runtime_template._get_unset_required_fields({}) + ) + assert set(unset_fields) == ( + set(("notebookRuntimeTemplateId",)) + & set( + ( + "parent", + "notebookRuntimeTemplate", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_notebook_runtime_template_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, + "post_create_notebook_runtime_template", + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, + "pre_create_notebook_runtime_template", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.CreateNotebookRuntimeTemplateRequest.pb( + notebook_service.CreateNotebookRuntimeTemplateRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.CreateNotebookRuntimeTemplateRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_notebook_runtime_template( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_notebook_runtime_template_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.CreateNotebookRuntimeTemplateRequest, +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_notebook_runtime_template(request) + + +def test_create_notebook_runtime_template_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_notebook_runtime_template(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/notebookRuntimeTemplates" + % client.transport._host, + args[1], + ) + + +def test_create_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_notebook_runtime_template( + notebook_service.CreateNotebookRuntimeTemplateRequest(), + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + +def test_create_notebook_runtime_template_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.GetNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_get_notebook_runtime_template_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntimeTemplate( + name="name_value", + display_name="display_name_value", + description="description_value", + is_default=True, + service_account="service_account_value", + etag="etag_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_notebook_runtime_template(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntimeTemplate) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.is_default is True + assert response.service_account == "service_account_value" + assert response.etag == "etag_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +def test_get_notebook_runtime_template_rest_required_fields( + request_type=notebook_service.GetNotebookRuntimeTemplateRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntimeTemplate() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_notebook_runtime_template(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_notebook_runtime_template_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_notebook_runtime_template._get_unset_required_fields( + {} + ) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_notebook_runtime_template_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_get_notebook_runtime_template" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_get_notebook_runtime_template" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.GetNotebookRuntimeTemplateRequest.pb( + notebook_service.GetNotebookRuntimeTemplateRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = notebook_runtime.NotebookRuntimeTemplate.to_json( + notebook_runtime.NotebookRuntimeTemplate() + ) + + request = notebook_service.GetNotebookRuntimeTemplateRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = notebook_runtime.NotebookRuntimeTemplate() + + client.get_notebook_runtime_template( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_notebook_runtime_template_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.GetNotebookRuntimeTemplateRequest, +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_notebook_runtime_template(request) + + +def test_get_notebook_runtime_template_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntimeTemplate() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_notebook_runtime_template(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}" + % client.transport._host, + args[1], + ) + + +def test_get_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notebook_runtime_template( + notebook_service.GetNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +def test_get_notebook_runtime_template_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.ListNotebookRuntimeTemplatesRequest, + dict, + ], +) +def test_list_notebook_runtime_templates_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_notebook_runtime_templates(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimeTemplatesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_notebook_runtime_templates_rest_required_fields( + request_type=notebook_service.ListNotebookRuntimeTemplatesRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_notebook_runtime_templates._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_notebook_runtime_templates._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + "read_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_notebook_runtime_templates(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_notebook_runtime_templates_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_notebook_runtime_templates._get_unset_required_fields( + {} + ) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + "readMask", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_notebook_runtime_templates_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.NotebookServiceRestInterceptor, + "post_list_notebook_runtime_templates", + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_list_notebook_runtime_templates" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.ListNotebookRuntimeTemplatesRequest.pb( + notebook_service.ListNotebookRuntimeTemplatesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + notebook_service.ListNotebookRuntimeTemplatesResponse.to_json( + notebook_service.ListNotebookRuntimeTemplatesResponse() + ) + ) + + request = notebook_service.ListNotebookRuntimeTemplatesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + + client.list_notebook_runtime_templates( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_notebook_runtime_templates_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.ListNotebookRuntimeTemplatesRequest, +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_notebook_runtime_templates(request) + + +def test_list_notebook_runtime_templates_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_notebook_runtime_templates(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/notebookRuntimeTemplates" + % client.transport._host, + args[1], + ) + + +def test_list_notebook_runtime_templates_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_notebook_runtime_templates( + notebook_service.ListNotebookRuntimeTemplatesRequest(), + parent="parent_value", + ) + + +def test_list_notebook_runtime_templates_rest_pager(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + notebook_service.ListNotebookRuntimeTemplatesResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_notebook_runtime_templates(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, notebook_runtime.NotebookRuntimeTemplate) for i in results + ) + + pages = list( + client.list_notebook_runtime_templates(request=sample_request).pages + ) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.DeleteNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_delete_notebook_runtime_template_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_notebook_runtime_template(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_notebook_runtime_template_rest_required_fields( + request_type=notebook_service.DeleteNotebookRuntimeTemplateRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_notebook_runtime_template(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_notebook_runtime_template_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.delete_notebook_runtime_template._get_unset_required_fields({}) + ) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_notebook_runtime_template_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, + "post_delete_notebook_runtime_template", + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, + "pre_delete_notebook_runtime_template", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.DeleteNotebookRuntimeTemplateRequest.pb( + notebook_service.DeleteNotebookRuntimeTemplateRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.DeleteNotebookRuntimeTemplateRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_notebook_runtime_template( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_notebook_runtime_template_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.DeleteNotebookRuntimeTemplateRequest, +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_notebook_runtime_template(request) + + +def test_delete_notebook_runtime_template_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_notebook_runtime_template(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_notebook_runtime_template( + notebook_service.DeleteNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +def test_delete_notebook_runtime_template_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.AssignNotebookRuntimeRequest, + dict, + ], +) +def test_assign_notebook_runtime_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.assign_notebook_runtime(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_assign_notebook_runtime_rest_required_fields( + request_type=notebook_service.AssignNotebookRuntimeRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["notebook_runtime_template"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).assign_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["notebookRuntimeTemplate"] = "notebook_runtime_template_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).assign_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "notebookRuntimeTemplate" in jsonified_request + assert ( + jsonified_request["notebookRuntimeTemplate"] + == "notebook_runtime_template_value" + ) + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.assign_notebook_runtime(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_assign_notebook_runtime_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.assign_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "notebookRuntimeTemplate", + "notebookRuntime", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_assign_notebook_runtime_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_assign_notebook_runtime" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_assign_notebook_runtime" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.AssignNotebookRuntimeRequest.pb( + notebook_service.AssignNotebookRuntimeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.AssignNotebookRuntimeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.assign_notebook_runtime( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_assign_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.AssignNotebookRuntimeRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.assign_notebook_runtime(request) + + +def test_assign_notebook_runtime_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.assign_notebook_runtime(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/notebookRuntimes:assign" + % client.transport._host, + args[1], + ) + + +def test_assign_notebook_runtime_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.assign_notebook_runtime( + notebook_service.AssignNotebookRuntimeRequest(), + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", + ) + + +def test_assign_notebook_runtime_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.GetNotebookRuntimeRequest, + dict, + ], +) +def test_get_notebook_runtime_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntime( + name="name_value", + runtime_user="runtime_user_value", + proxy_uri="proxy_uri_value", + health_state=notebook_runtime.NotebookRuntime.HealthState.HEALTHY, + display_name="display_name_value", + description="description_value", + service_account="service_account_value", + runtime_state=notebook_runtime.NotebookRuntime.RuntimeState.RUNNING, + is_upgradable=True, + version="version_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntime.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_notebook_runtime(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntime) + assert response.name == "name_value" + assert response.runtime_user == "runtime_user_value" + assert response.proxy_uri == "proxy_uri_value" + assert response.health_state == notebook_runtime.NotebookRuntime.HealthState.HEALTHY + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.service_account == "service_account_value" + assert ( + response.runtime_state == notebook_runtime.NotebookRuntime.RuntimeState.RUNNING + ) + assert response.is_upgradable is True + assert response.version == "version_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +def test_get_notebook_runtime_rest_required_fields( + request_type=notebook_service.GetNotebookRuntimeRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntime() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntime.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_notebook_runtime(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_notebook_runtime_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_notebook_runtime_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_get_notebook_runtime" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_get_notebook_runtime" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.GetNotebookRuntimeRequest.pb( + notebook_service.GetNotebookRuntimeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = notebook_runtime.NotebookRuntime.to_json( + notebook_runtime.NotebookRuntime() + ) + + request = notebook_service.GetNotebookRuntimeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = notebook_runtime.NotebookRuntime() + + client.get_notebook_runtime( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.GetNotebookRuntimeRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_notebook_runtime(request) + + +def test_get_notebook_runtime_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntime() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntime.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_notebook_runtime(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/notebookRuntimes/*}" + % client.transport._host, + args[1], + ) + + +def test_get_notebook_runtime_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notebook_runtime( + notebook_service.GetNotebookRuntimeRequest(), + name="name_value", + ) + + +def test_get_notebook_runtime_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.ListNotebookRuntimesRequest, + dict, + ], +) +def test_list_notebook_runtimes_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_notebook_runtimes(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_notebook_runtimes_rest_required_fields( + request_type=notebook_service.ListNotebookRuntimesRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_notebook_runtimes._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_notebook_runtimes._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + "read_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_notebook_runtimes(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_notebook_runtimes_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_notebook_runtimes._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + "readMask", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_notebook_runtimes_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_list_notebook_runtimes" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_list_notebook_runtimes" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.ListNotebookRuntimesRequest.pb( + notebook_service.ListNotebookRuntimesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + notebook_service.ListNotebookRuntimesResponse.to_json( + notebook_service.ListNotebookRuntimesResponse() + ) + ) + + request = notebook_service.ListNotebookRuntimesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = notebook_service.ListNotebookRuntimesResponse() + + client.list_notebook_runtimes( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_notebook_runtimes_rest_bad_request( + transport: str = "rest", request_type=notebook_service.ListNotebookRuntimesRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_notebook_runtimes(request) + + +def test_list_notebook_runtimes_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_notebook_runtimes(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/notebookRuntimes" + % client.transport._host, + args[1], + ) + + +def test_list_notebook_runtimes_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_notebook_runtimes( + notebook_service.ListNotebookRuntimesRequest(), + parent="parent_value", + ) + + +def test_list_notebook_runtimes_rest_pager(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + notebook_service.ListNotebookRuntimesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_notebook_runtimes(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, notebook_runtime.NotebookRuntime) for i in results) + + pages = list(client.list_notebook_runtimes(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.DeleteNotebookRuntimeRequest, + dict, + ], +) +def test_delete_notebook_runtime_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_notebook_runtime(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_notebook_runtime_rest_required_fields( + request_type=notebook_service.DeleteNotebookRuntimeRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_notebook_runtime(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_notebook_runtime_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_notebook_runtime_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_delete_notebook_runtime" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_delete_notebook_runtime" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.DeleteNotebookRuntimeRequest.pb( + notebook_service.DeleteNotebookRuntimeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.DeleteNotebookRuntimeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_notebook_runtime( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.DeleteNotebookRuntimeRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_notebook_runtime(request) + + +def test_delete_notebook_runtime_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_notebook_runtime(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/notebookRuntimes/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_notebook_runtime_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_notebook_runtime( + notebook_service.DeleteNotebookRuntimeRequest(), + name="name_value", + ) + + +def test_delete_notebook_runtime_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.UpgradeNotebookRuntimeRequest, + dict, + ], +) +def test_upgrade_notebook_runtime_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.upgrade_notebook_runtime(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_upgrade_notebook_runtime_rest_required_fields( + request_type=notebook_service.UpgradeNotebookRuntimeRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).upgrade_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).upgrade_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.upgrade_notebook_runtime(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_upgrade_notebook_runtime_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.upgrade_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_upgrade_notebook_runtime_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_upgrade_notebook_runtime" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_upgrade_notebook_runtime" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.UpgradeNotebookRuntimeRequest.pb( + notebook_service.UpgradeNotebookRuntimeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.UpgradeNotebookRuntimeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.upgrade_notebook_runtime( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_upgrade_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.UpgradeNotebookRuntimeRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.upgrade_notebook_runtime(request) + + +def test_upgrade_notebook_runtime_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.upgrade_notebook_runtime(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/notebookRuntimes/*}:upgrade" + % client.transport._host, + args[1], + ) + + +def test_upgrade_notebook_runtime_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.upgrade_notebook_runtime( + notebook_service.UpgradeNotebookRuntimeRequest(), + name="name_value", + ) + + +def test_upgrade_notebook_runtime_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.StartNotebookRuntimeRequest, + dict, + ], +) +def test_start_notebook_runtime_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.start_notebook_runtime(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_start_notebook_runtime_rest_required_fields( + request_type=notebook_service.StartNotebookRuntimeRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).start_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).start_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.start_notebook_runtime(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_start_notebook_runtime_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.start_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_start_notebook_runtime_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_start_notebook_runtime" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_start_notebook_runtime" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.StartNotebookRuntimeRequest.pb( + notebook_service.StartNotebookRuntimeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.StartNotebookRuntimeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.start_notebook_runtime( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_start_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.StartNotebookRuntimeRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.start_notebook_runtime(request) + + +def test_start_notebook_runtime_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.start_notebook_runtime(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/notebookRuntimes/*}:start" + % client.transport._host, + args[1], + ) + + +def test_start_notebook_runtime_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.start_notebook_runtime( + notebook_service.StartNotebookRuntimeRequest(), + name="name_value", + ) + + +def test_start_notebook_runtime_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.NotebookServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.NotebookServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NotebookServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.NotebookServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = NotebookServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = NotebookServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.NotebookServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NotebookServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.NotebookServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = NotebookServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.NotebookServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.NotebookServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceGrpcAsyncIOTransport, + transports.NotebookServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = NotebookServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.NotebookServiceGrpcTransport, + ) + + +def test_notebook_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.NotebookServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_notebook_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1.services.notebook_service.transports.NotebookServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.NotebookServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_notebook_runtime_template", + "get_notebook_runtime_template", + "list_notebook_runtime_templates", + "delete_notebook_runtime_template", + "assign_notebook_runtime", + "get_notebook_runtime", + "list_notebook_runtimes", + "delete_notebook_runtime", + "upgrade_notebook_runtime", + "start_notebook_runtime", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + "get_location", + "list_locations", + "get_operation", + "wait_operation", + "cancel_operation", + "delete_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_notebook_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1.services.notebook_service.transports.NotebookServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NotebookServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_notebook_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1.services.notebook_service.transports.NotebookServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NotebookServiceTransport() + adc.assert_called_once() + + +def test_notebook_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + NotebookServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceGrpcAsyncIOTransport, + ], +) +def test_notebook_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceGrpcAsyncIOTransport, + transports.NotebookServiceRestTransport, + ], +) +def test_notebook_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.NotebookServiceGrpcTransport, grpc_helpers), + (transports.NotebookServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_notebook_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceGrpcAsyncIOTransport, + ], +) +def test_notebook_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_notebook_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.NotebookServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_notebook_service_rest_lro_client(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_notebook_service_host_no_port(transport_name): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_notebook_service_host_with_port(transport_name): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_notebook_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = NotebookServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = NotebookServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_notebook_runtime_template._session + session2 = client2.transport.create_notebook_runtime_template._session + assert session1 != session2 + session1 = client1.transport.get_notebook_runtime_template._session + session2 = client2.transport.get_notebook_runtime_template._session + assert session1 != session2 + session1 = client1.transport.list_notebook_runtime_templates._session + session2 = client2.transport.list_notebook_runtime_templates._session + assert session1 != session2 + session1 = client1.transport.delete_notebook_runtime_template._session + session2 = client2.transport.delete_notebook_runtime_template._session + assert session1 != session2 + session1 = client1.transport.assign_notebook_runtime._session + session2 = client2.transport.assign_notebook_runtime._session + assert session1 != session2 + session1 = client1.transport.get_notebook_runtime._session + session2 = client2.transport.get_notebook_runtime._session + assert session1 != session2 + session1 = client1.transport.list_notebook_runtimes._session + session2 = client2.transport.list_notebook_runtimes._session + assert session1 != session2 + session1 = client1.transport.delete_notebook_runtime._session + session2 = client2.transport.delete_notebook_runtime._session + assert session1 != session2 + session1 = client1.transport.upgrade_notebook_runtime._session + session2 = client2.transport.upgrade_notebook_runtime._session + assert session1 != session2 + session1 = client1.transport.start_notebook_runtime._session + session2 = client2.transport.start_notebook_runtime._session + assert session1 != session2 + + +def test_notebook_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.NotebookServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_notebook_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.NotebookServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceGrpcAsyncIOTransport, + ], +) +def test_notebook_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceGrpcAsyncIOTransport, + ], +) +def test_notebook_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_notebook_service_grpc_lro_client(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_notebook_service_grpc_lro_async_client(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_network_path(): + project = "squid" + network = "clam" + expected = "projects/{project}/global/networks/{network}".format( + project=project, + network=network, + ) + actual = NotebookServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "whelk", + "network": "octopus", + } + path = NotebookServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_network_path(path) + assert expected == actual + + +def test_notebook_runtime_path(): + project = "oyster" + location = "nudibranch" + notebook_runtime = "cuttlefish" + expected = "projects/{project}/locations/{location}/notebookRuntimes/{notebook_runtime}".format( + project=project, + location=location, + notebook_runtime=notebook_runtime, + ) + actual = NotebookServiceClient.notebook_runtime_path( + project, location, notebook_runtime + ) + assert expected == actual + + +def test_parse_notebook_runtime_path(): + expected = { + "project": "mussel", + "location": "winkle", + "notebook_runtime": "nautilus", + } + path = NotebookServiceClient.notebook_runtime_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_notebook_runtime_path(path) + assert expected == actual + + +def test_notebook_runtime_template_path(): + project = "scallop" + location = "abalone" + notebook_runtime_template = "squid" + expected = "projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}".format( + project=project, + location=location, + notebook_runtime_template=notebook_runtime_template, + ) + actual = NotebookServiceClient.notebook_runtime_template_path( + project, location, notebook_runtime_template + ) + assert expected == actual + + +def test_parse_notebook_runtime_template_path(): + expected = { + "project": "clam", + "location": "whelk", + "notebook_runtime_template": "octopus", + } + path = NotebookServiceClient.notebook_runtime_template_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_notebook_runtime_template_path(path) + assert expected == actual + + +def test_subnetwork_path(): + project = "oyster" + region = "nudibranch" + subnetwork = "cuttlefish" + expected = "projects/{project}/regions/{region}/subnetworks/{subnetwork}".format( + project=project, + region=region, + subnetwork=subnetwork, + ) + actual = NotebookServiceClient.subnetwork_path(project, region, subnetwork) + assert expected == actual + + +def test_parse_subnetwork_path(): + expected = { + "project": "mussel", + "region": "winkle", + "subnetwork": "nautilus", + } + path = NotebookServiceClient.subnetwork_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_subnetwork_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "scallop" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = NotebookServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = NotebookServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "squid" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = NotebookServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "clam", + } + path = NotebookServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "whelk" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = NotebookServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "octopus", + } + path = NotebookServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "oyster" + expected = "projects/{project}".format( + project=project, + ) + actual = NotebookServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = NotebookServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = NotebookServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = NotebookServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.NotebookServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.NotebookServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = NotebookServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_get_location_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.GetLocationRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_location(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.GetLocationRequest, + dict, + ], +) +def test_get_location_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.Location() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_location(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_list_locations_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.ListLocationsRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({"name": "projects/sample1"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_locations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.ListLocationsRequest, + dict, + ], +) +def test_list_locations_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_locations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_get_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_set_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_test_iam_permissions_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + +def test_cancel_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.CancelOperationRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.DeleteOperationRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.DeleteOperationRequest, + dict, + ], +) +def test_delete_operation_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.ListOperationsRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_wait_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.WaitOperationRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.WaitOperationRequest, + dict, + ], +) +def test_wait_operation_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.wait_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_delete_operation(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_delete_operation_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_wait_operation(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_wait_operation_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_wait_operation_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_list_locations_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_locations_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_get_location_field_headers(): + client = NotebookServiceClient(credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +def test_get_location_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +def test_get_iam_policy(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_test_iam_permissions(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (NotebookServiceClient, transports.NotebookServiceGrpcTransport), + (NotebookServiceAsyncClient, transports.NotebookServiceGrpcAsyncIOTransport), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py b/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py new file mode 100644 index 0000000000..9ff55ff500 --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1/test_persistent_resource_service.py @@ -0,0 +1,8272 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from google.api_core import api_core_version +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1.services.persistent_resource_service import ( + PersistentResourceServiceAsyncClient, +) +from google.cloud.aiplatform_v1.services.persistent_resource_service import ( + PersistentResourceServiceClient, +) +from google.cloud.aiplatform_v1.services.persistent_resource_service import pagers +from google.cloud.aiplatform_v1.services.persistent_resource_service import transports +from google.cloud.aiplatform_v1.types import accelerator_type +from google.cloud.aiplatform_v1.types import encryption_spec +from google.cloud.aiplatform_v1.types import machine_resources +from google.cloud.aiplatform_v1.types import operation as gca_operation +from google.cloud.aiplatform_v1.types import persistent_resource +from google.cloud.aiplatform_v1.types import ( + persistent_resource as gca_persistent_resource, +) +from google.cloud.aiplatform_v1.types import persistent_resource_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PersistentResourceServiceClient._get_default_mtls_endpoint(None) is None + assert ( + PersistentResourceServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + PersistentResourceServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + PersistentResourceServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + PersistentResourceServiceClient._get_default_mtls_endpoint( + sandbox_mtls_endpoint + ) + == sandbox_mtls_endpoint + ) + assert ( + PersistentResourceServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test__read_environment_variables(): + assert PersistentResourceServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert PersistentResourceServiceClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert PersistentResourceServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + PersistentResourceServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert PersistentResourceServiceClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert PersistentResourceServiceClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert PersistentResourceServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + PersistentResourceServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert PersistentResourceServiceClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert PersistentResourceServiceClient._get_client_cert_source(None, False) is None + assert ( + PersistentResourceServiceClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + PersistentResourceServiceClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + PersistentResourceServiceClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + PersistentResourceServiceClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + PersistentResourceServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(PersistentResourceServiceClient), +) +@mock.patch.object( + PersistentResourceServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(PersistentResourceServiceAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = PersistentResourceServiceClient._DEFAULT_UNIVERSE + default_endpoint = ( + PersistentResourceServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + ) + mock_universe = "bar.com" + mock_endpoint = PersistentResourceServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + PersistentResourceServiceClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + PersistentResourceServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == PersistentResourceServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + PersistentResourceServiceClient._get_api_endpoint( + None, None, default_universe, "auto" + ) + == default_endpoint + ) + assert ( + PersistentResourceServiceClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == PersistentResourceServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + PersistentResourceServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == PersistentResourceServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + PersistentResourceServiceClient._get_api_endpoint( + None, None, mock_universe, "never" + ) + == mock_endpoint + ) + assert ( + PersistentResourceServiceClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + PersistentResourceServiceClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + PersistentResourceServiceClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + PersistentResourceServiceClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + PersistentResourceServiceClient._get_universe_domain(None, None) + == PersistentResourceServiceClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + PersistentResourceServiceClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceGrpcTransport, + "grpc", + ), + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceRestTransport, + "rest", + ), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (PersistentResourceServiceClient, "grpc"), + (PersistentResourceServiceAsyncClient, "grpc_asyncio"), + (PersistentResourceServiceClient, "rest"), + ], +) +def test_persistent_resource_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.PersistentResourceServiceGrpcTransport, "grpc"), + (transports.PersistentResourceServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.PersistentResourceServiceRestTransport, "rest"), + ], +) +def test_persistent_resource_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (PersistentResourceServiceClient, "grpc"), + (PersistentResourceServiceAsyncClient, "grpc_asyncio"), + (PersistentResourceServiceClient, "rest"), + ], +) +def test_persistent_resource_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +def test_persistent_resource_service_client_get_transport_class(): + transport = PersistentResourceServiceClient.get_transport_class() + available_transports = [ + transports.PersistentResourceServiceGrpcTransport, + transports.PersistentResourceServiceRestTransport, + ] + assert transport in available_transports + + transport = PersistentResourceServiceClient.get_transport_class("grpc") + assert transport == transports.PersistentResourceServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceGrpcTransport, + "grpc", + ), + ( + PersistentResourceServiceAsyncClient, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceRestTransport, + "rest", + ), + ], +) +@mock.patch.object( + PersistentResourceServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(PersistentResourceServiceClient), +) +@mock.patch.object( + PersistentResourceServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(PersistentResourceServiceAsyncClient), +) +def test_persistent_resource_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object( + PersistentResourceServiceClient, "get_transport_class" + ) as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object( + PersistentResourceServiceClient, "get_transport_class" + ) as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceGrpcTransport, + "grpc", + "true", + ), + ( + PersistentResourceServiceAsyncClient, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceGrpcTransport, + "grpc", + "false", + ), + ( + PersistentResourceServiceAsyncClient, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceRestTransport, + "rest", + "true", + ), + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + PersistentResourceServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(PersistentResourceServiceClient), +) +@mock.patch.object( + PersistentResourceServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(PersistentResourceServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_persistent_resource_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", + [PersistentResourceServiceClient, PersistentResourceServiceAsyncClient], +) +@mock.patch.object( + PersistentResourceServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(PersistentResourceServiceClient), +) +@mock.patch.object( + PersistentResourceServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(PersistentResourceServiceAsyncClient), +) +def test_persistent_resource_service_client_get_mtls_endpoint_and_cert_source( + client_class, +): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", + [PersistentResourceServiceClient, PersistentResourceServiceAsyncClient], +) +@mock.patch.object( + PersistentResourceServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(PersistentResourceServiceClient), +) +@mock.patch.object( + PersistentResourceServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(PersistentResourceServiceAsyncClient), +) +def test_persistent_resource_service_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = PersistentResourceServiceClient._DEFAULT_UNIVERSE + default_endpoint = ( + PersistentResourceServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + ) + mock_universe = "bar.com" + mock_endpoint = PersistentResourceServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceGrpcTransport, + "grpc", + ), + ( + PersistentResourceServiceAsyncClient, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceRestTransport, + "rest", + ), + ], +) +def test_persistent_resource_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + PersistentResourceServiceAsyncClient, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceRestTransport, + "rest", + None, + ), + ], +) +def test_persistent_resource_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_persistent_resource_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1.services.persistent_resource_service.transports.PersistentResourceServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = PersistentResourceServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + PersistentResourceServiceAsyncClient, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_persistent_resource_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.CreatePersistentResourceRequest, + dict, + ], +) +def test_create_persistent_resource(request_type, transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.CreatePersistentResourceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_persistent_resource_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_persistent_resource), "__call__" + ) as call: + client.create_persistent_resource() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.CreatePersistentResourceRequest() + + +def test_create_persistent_resource_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = persistent_resource_service.CreatePersistentResourceRequest( + parent="parent_value", + persistent_resource_id="persistent_resource_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_persistent_resource), "__call__" + ) as call: + client.create_persistent_resource(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.CreatePersistentResourceRequest( + parent="parent_value", + persistent_resource_id="persistent_resource_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_persistent_resource_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_persistent_resource() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.CreatePersistentResourceRequest() + + +@pytest.mark.asyncio +async def test_create_persistent_resource_async( + transport: str = "grpc_asyncio", + request_type=persistent_resource_service.CreatePersistentResourceRequest, +): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.CreatePersistentResourceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_persistent_resource_async_from_dict(): + await test_create_persistent_resource_async(request_type=dict) + + +def test_create_persistent_resource_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.CreatePersistentResourceRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_persistent_resource), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_persistent_resource_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.CreatePersistentResourceRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_persistent_resource), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_persistent_resource_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_persistent_resource( + parent="parent_value", + persistent_resource=gca_persistent_resource.PersistentResource( + name="name_value" + ), + persistent_resource_id="persistent_resource_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].persistent_resource + mock_val = gca_persistent_resource.PersistentResource(name="name_value") + assert arg == mock_val + arg = args[0].persistent_resource_id + mock_val = "persistent_resource_id_value" + assert arg == mock_val + + +def test_create_persistent_resource_flattened_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_persistent_resource( + persistent_resource_service.CreatePersistentResourceRequest(), + parent="parent_value", + persistent_resource=gca_persistent_resource.PersistentResource( + name="name_value" + ), + persistent_resource_id="persistent_resource_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_persistent_resource_flattened_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_persistent_resource( + parent="parent_value", + persistent_resource=gca_persistent_resource.PersistentResource( + name="name_value" + ), + persistent_resource_id="persistent_resource_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].persistent_resource + mock_val = gca_persistent_resource.PersistentResource(name="name_value") + assert arg == mock_val + arg = args[0].persistent_resource_id + mock_val = "persistent_resource_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_persistent_resource_flattened_error_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_persistent_resource( + persistent_resource_service.CreatePersistentResourceRequest(), + parent="parent_value", + persistent_resource=gca_persistent_resource.PersistentResource( + name="name_value" + ), + persistent_resource_id="persistent_resource_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.GetPersistentResourceRequest, + dict, + ], +) +def test_get_persistent_resource(request_type, transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = persistent_resource.PersistentResource( + name="name_value", + display_name="display_name_value", + state=persistent_resource.PersistentResource.State.PROVISIONING, + network="network_value", + reserved_ip_ranges=["reserved_ip_ranges_value"], + ) + response = client.get_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.GetPersistentResourceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, persistent_resource.PersistentResource) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == persistent_resource.PersistentResource.State.PROVISIONING + assert response.network == "network_value" + assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"] + + +def test_get_persistent_resource_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_persistent_resource), "__call__" + ) as call: + client.get_persistent_resource() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.GetPersistentResourceRequest() + + +def test_get_persistent_resource_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = persistent_resource_service.GetPersistentResourceRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_persistent_resource), "__call__" + ) as call: + client.get_persistent_resource(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.GetPersistentResourceRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_persistent_resource_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + persistent_resource.PersistentResource( + name="name_value", + display_name="display_name_value", + state=persistent_resource.PersistentResource.State.PROVISIONING, + network="network_value", + reserved_ip_ranges=["reserved_ip_ranges_value"], + ) + ) + response = await client.get_persistent_resource() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.GetPersistentResourceRequest() + + +@pytest.mark.asyncio +async def test_get_persistent_resource_async( + transport: str = "grpc_asyncio", + request_type=persistent_resource_service.GetPersistentResourceRequest, +): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + persistent_resource.PersistentResource( + name="name_value", + display_name="display_name_value", + state=persistent_resource.PersistentResource.State.PROVISIONING, + network="network_value", + reserved_ip_ranges=["reserved_ip_ranges_value"], + ) + ) + response = await client.get_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.GetPersistentResourceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, persistent_resource.PersistentResource) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == persistent_resource.PersistentResource.State.PROVISIONING + assert response.network == "network_value" + assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"] + + +@pytest.mark.asyncio +async def test_get_persistent_resource_async_from_dict(): + await test_get_persistent_resource_async(request_type=dict) + + +def test_get_persistent_resource_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.GetPersistentResourceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_persistent_resource), "__call__" + ) as call: + call.return_value = persistent_resource.PersistentResource() + client.get_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_persistent_resource_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.GetPersistentResourceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_persistent_resource), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + persistent_resource.PersistentResource() + ) + await client.get_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_persistent_resource_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = persistent_resource.PersistentResource() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_persistent_resource( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_persistent_resource_flattened_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_persistent_resource( + persistent_resource_service.GetPersistentResourceRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_persistent_resource_flattened_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = persistent_resource.PersistentResource() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + persistent_resource.PersistentResource() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_persistent_resource( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_persistent_resource_flattened_error_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_persistent_resource( + persistent_resource_service.GetPersistentResourceRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.ListPersistentResourcesRequest, + dict, + ], +) +def test_list_persistent_resources(request_type, transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = persistent_resource_service.ListPersistentResourcesResponse( + next_page_token="next_page_token_value", + ) + response = client.list_persistent_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.ListPersistentResourcesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPersistentResourcesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_persistent_resources_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), "__call__" + ) as call: + client.list_persistent_resources() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.ListPersistentResourcesRequest() + + +def test_list_persistent_resources_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = persistent_resource_service.ListPersistentResourcesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), "__call__" + ) as call: + client.list_persistent_resources(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.ListPersistentResourcesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +@pytest.mark.asyncio +async def test_list_persistent_resources_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + persistent_resource_service.ListPersistentResourcesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_persistent_resources() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.ListPersistentResourcesRequest() + + +@pytest.mark.asyncio +async def test_list_persistent_resources_async( + transport: str = "grpc_asyncio", + request_type=persistent_resource_service.ListPersistentResourcesRequest, +): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + persistent_resource_service.ListPersistentResourcesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_persistent_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.ListPersistentResourcesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPersistentResourcesAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_persistent_resources_async_from_dict(): + await test_list_persistent_resources_async(request_type=dict) + + +def test_list_persistent_resources_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.ListPersistentResourcesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), "__call__" + ) as call: + call.return_value = ( + persistent_resource_service.ListPersistentResourcesResponse() + ) + client.list_persistent_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_persistent_resources_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.ListPersistentResourcesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + persistent_resource_service.ListPersistentResourcesResponse() + ) + await client.list_persistent_resources(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_persistent_resources_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + persistent_resource_service.ListPersistentResourcesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_persistent_resources( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_persistent_resources_flattened_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_persistent_resources( + persistent_resource_service.ListPersistentResourcesRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_persistent_resources_flattened_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = ( + persistent_resource_service.ListPersistentResourcesResponse() + ) + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + persistent_resource_service.ListPersistentResourcesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_persistent_resources( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_persistent_resources_flattened_error_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_persistent_resources( + persistent_resource_service.ListPersistentResourcesRequest(), + parent="parent_value", + ) + + +def test_list_persistent_resources_pager(transport_name: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + ], + next_page_token="abc", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[], + next_page_token="def", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + ], + next_page_token="ghi", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_persistent_resources(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, persistent_resource.PersistentResource) for i in results + ) + + +def test_list_persistent_resources_pages(transport_name: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + ], + next_page_token="abc", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[], + next_page_token="def", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + ], + next_page_token="ghi", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + ], + ), + RuntimeError, + ) + pages = list(client.list_persistent_resources(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_persistent_resources_async_pager(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + ], + next_page_token="abc", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[], + next_page_token="def", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + ], + next_page_token="ghi", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_persistent_resources( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, persistent_resource.PersistentResource) for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_persistent_resources_async_pages(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_persistent_resources), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + ], + next_page_token="abc", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[], + next_page_token="def", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + ], + next_page_token="ghi", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_persistent_resources(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.DeletePersistentResourceRequest, + dict, + ], +) +def test_delete_persistent_resource(request_type, transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.DeletePersistentResourceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_persistent_resource_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_persistent_resource), "__call__" + ) as call: + client.delete_persistent_resource() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.DeletePersistentResourceRequest() + + +def test_delete_persistent_resource_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = persistent_resource_service.DeletePersistentResourceRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_persistent_resource), "__call__" + ) as call: + client.delete_persistent_resource(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.DeletePersistentResourceRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_persistent_resource_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_persistent_resource() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.DeletePersistentResourceRequest() + + +@pytest.mark.asyncio +async def test_delete_persistent_resource_async( + transport: str = "grpc_asyncio", + request_type=persistent_resource_service.DeletePersistentResourceRequest, +): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.DeletePersistentResourceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_persistent_resource_async_from_dict(): + await test_delete_persistent_resource_async(request_type=dict) + + +def test_delete_persistent_resource_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.DeletePersistentResourceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_persistent_resource), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_persistent_resource_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.DeletePersistentResourceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_persistent_resource), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_persistent_resource_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_persistent_resource( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_persistent_resource_flattened_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_persistent_resource( + persistent_resource_service.DeletePersistentResourceRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_persistent_resource_flattened_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_persistent_resource( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_persistent_resource_flattened_error_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_persistent_resource( + persistent_resource_service.DeletePersistentResourceRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.UpdatePersistentResourceRequest, + dict, + ], +) +def test_update_persistent_resource(request_type, transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.update_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.UpdatePersistentResourceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_persistent_resource_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_persistent_resource), "__call__" + ) as call: + client.update_persistent_resource() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.UpdatePersistentResourceRequest() + + +def test_update_persistent_resource_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = persistent_resource_service.UpdatePersistentResourceRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_persistent_resource), "__call__" + ) as call: + client.update_persistent_resource(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.UpdatePersistentResourceRequest() + + +@pytest.mark.asyncio +async def test_update_persistent_resource_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_persistent_resource() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.UpdatePersistentResourceRequest() + + +@pytest.mark.asyncio +async def test_update_persistent_resource_async( + transport: str = "grpc_asyncio", + request_type=persistent_resource_service.UpdatePersistentResourceRequest, +): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.update_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.UpdatePersistentResourceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_persistent_resource_async_from_dict(): + await test_update_persistent_resource_async(request_type=dict) + + +def test_update_persistent_resource_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.UpdatePersistentResourceRequest() + + request.persistent_resource.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_persistent_resource), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.update_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "persistent_resource.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_persistent_resource_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.UpdatePersistentResourceRequest() + + request.persistent_resource.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_persistent_resource), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.update_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "persistent_resource.name=name_value", + ) in kw["metadata"] + + +def test_update_persistent_resource_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_persistent_resource( + persistent_resource=gca_persistent_resource.PersistentResource( + name="name_value" + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].persistent_resource + mock_val = gca_persistent_resource.PersistentResource(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_persistent_resource_flattened_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_persistent_resource( + persistent_resource_service.UpdatePersistentResourceRequest(), + persistent_resource=gca_persistent_resource.PersistentResource( + name="name_value" + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_persistent_resource_flattened_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_persistent_resource( + persistent_resource=gca_persistent_resource.PersistentResource( + name="name_value" + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].persistent_resource + mock_val = gca_persistent_resource.PersistentResource(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_persistent_resource_flattened_error_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_persistent_resource( + persistent_resource_service.UpdatePersistentResourceRequest(), + persistent_resource=gca_persistent_resource.PersistentResource( + name="name_value" + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.RebootPersistentResourceRequest, + dict, + ], +) +def test_reboot_persistent_resource(request_type, transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.reboot_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.RebootPersistentResourceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_reboot_persistent_resource_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + client.reboot_persistent_resource() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.RebootPersistentResourceRequest() + + +def test_reboot_persistent_resource_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = persistent_resource_service.RebootPersistentResourceRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + client.reboot_persistent_resource(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.RebootPersistentResourceRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_reboot_persistent_resource_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.reboot_persistent_resource() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.RebootPersistentResourceRequest() + + +@pytest.mark.asyncio +async def test_reboot_persistent_resource_async( + transport: str = "grpc_asyncio", + request_type=persistent_resource_service.RebootPersistentResourceRequest, +): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.reboot_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.RebootPersistentResourceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_reboot_persistent_resource_async_from_dict(): + await test_reboot_persistent_resource_async(request_type=dict) + + +def test_reboot_persistent_resource_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.RebootPersistentResourceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.reboot_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_reboot_persistent_resource_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.RebootPersistentResourceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.reboot_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_reboot_persistent_resource_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.reboot_persistent_resource( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_reboot_persistent_resource_flattened_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.reboot_persistent_resource( + persistent_resource_service.RebootPersistentResourceRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_reboot_persistent_resource_flattened_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.reboot_persistent_resource( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_reboot_persistent_resource_flattened_error_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.reboot_persistent_resource( + persistent_resource_service.RebootPersistentResourceRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.CreatePersistentResourceRequest, + dict, + ], +) +def test_create_persistent_resource_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["persistent_resource"] = { + "name": "name_value", + "display_name": "display_name_value", + "resource_pools": [ + { + "id": "id_value", + "machine_spec": { + "machine_type": "machine_type_value", + "accelerator_type": 1, + "accelerator_count": 1805, + "tpu_topology": "tpu_topology_value", + }, + "replica_count": 1384, + "disk_spec": { + "boot_disk_type": "boot_disk_type_value", + "boot_disk_size_gb": 1792, + }, + "used_replica_count": 1912, + "autoscaling_spec": { + "min_replica_count": 1803, + "max_replica_count": 1805, + }, + } + ], + "state": 1, + "error": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "create_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "update_time": {}, + "labels": {}, + "network": "network_value", + "encryption_spec": {"kms_key_name": "kms_key_name_value"}, + "resource_runtime_spec": { + "service_account_spec": { + "enable_custom_service_account": True, + "service_account": "service_account_value", + }, + "ray_spec": {}, + }, + "resource_runtime": {}, + "reserved_ip_ranges": [ + "reserved_ip_ranges_value1", + "reserved_ip_ranges_value2", + ], + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = ( + persistent_resource_service.CreatePersistentResourceRequest.meta.fields[ + "persistent_resource" + ] + ) + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["persistent_resource"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["persistent_resource"][field])): + del request_init["persistent_resource"][field][i][subfield] + else: + del request_init["persistent_resource"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_persistent_resource(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_persistent_resource_rest_required_fields( + request_type=persistent_resource_service.CreatePersistentResourceRequest, +): + transport_class = transports.PersistentResourceServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["persistent_resource_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + assert "persistentResourceId" not in jsonified_request + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_persistent_resource._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "persistentResourceId" in jsonified_request + assert ( + jsonified_request["persistentResourceId"] + == request_init["persistent_resource_id"] + ) + + jsonified_request["parent"] = "parent_value" + jsonified_request["persistentResourceId"] = "persistent_resource_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_persistent_resource._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("persistent_resource_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "persistentResourceId" in jsonified_request + assert jsonified_request["persistentResourceId"] == "persistent_resource_id_value" + + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_persistent_resource(request) + + expected_params = [ + ( + "persistentResourceId", + "", + ), + ] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_persistent_resource_rest_unset_required_fields(): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_persistent_resource._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("persistentResourceId",)) + & set( + ( + "parent", + "persistentResource", + "persistentResourceId", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_persistent_resource_rest_interceptors(null_interceptor): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PersistentResourceServiceRestInterceptor(), + ) + client = PersistentResourceServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "post_create_persistent_resource", + ) as post, mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "pre_create_persistent_resource", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = persistent_resource_service.CreatePersistentResourceRequest.pb( + persistent_resource_service.CreatePersistentResourceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = persistent_resource_service.CreatePersistentResourceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_persistent_resource( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_persistent_resource_rest_bad_request( + transport: str = "rest", + request_type=persistent_resource_service.CreatePersistentResourceRequest, +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_persistent_resource(request) + + +def test_create_persistent_resource_rest_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + persistent_resource=gca_persistent_resource.PersistentResource( + name="name_value" + ), + persistent_resource_id="persistent_resource_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_persistent_resource(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/persistentResources" + % client.transport._host, + args[1], + ) + + +def test_create_persistent_resource_rest_flattened_error(transport: str = "rest"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_persistent_resource( + persistent_resource_service.CreatePersistentResourceRequest(), + parent="parent_value", + persistent_resource=gca_persistent_resource.PersistentResource( + name="name_value" + ), + persistent_resource_id="persistent_resource_id_value", + ) + + +def test_create_persistent_resource_rest_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.GetPersistentResourceRequest, + dict, + ], +) +def test_get_persistent_resource_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = persistent_resource.PersistentResource( + name="name_value", + display_name="display_name_value", + state=persistent_resource.PersistentResource.State.PROVISIONING, + network="network_value", + reserved_ip_ranges=["reserved_ip_ranges_value"], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = persistent_resource.PersistentResource.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_persistent_resource(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, persistent_resource.PersistentResource) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.state == persistent_resource.PersistentResource.State.PROVISIONING + assert response.network == "network_value" + assert response.reserved_ip_ranges == ["reserved_ip_ranges_value"] + + +def test_get_persistent_resource_rest_required_fields( + request_type=persistent_resource_service.GetPersistentResourceRequest, +): + transport_class = transports.PersistentResourceServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_persistent_resource._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_persistent_resource._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = persistent_resource.PersistentResource() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = persistent_resource.PersistentResource.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_persistent_resource(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_persistent_resource_rest_unset_required_fields(): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_persistent_resource._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_persistent_resource_rest_interceptors(null_interceptor): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PersistentResourceServiceRestInterceptor(), + ) + client = PersistentResourceServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "post_get_persistent_resource", + ) as post, mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "pre_get_persistent_resource", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = persistent_resource_service.GetPersistentResourceRequest.pb( + persistent_resource_service.GetPersistentResourceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = persistent_resource.PersistentResource.to_json( + persistent_resource.PersistentResource() + ) + + request = persistent_resource_service.GetPersistentResourceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = persistent_resource.PersistentResource() + + client.get_persistent_resource( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_persistent_resource_rest_bad_request( + transport: str = "rest", + request_type=persistent_resource_service.GetPersistentResourceRequest, +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_persistent_resource(request) + + +def test_get_persistent_resource_rest_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = persistent_resource.PersistentResource() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = persistent_resource.PersistentResource.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_persistent_resource(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/persistentResources/*}" + % client.transport._host, + args[1], + ) + + +def test_get_persistent_resource_rest_flattened_error(transport: str = "rest"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_persistent_resource( + persistent_resource_service.GetPersistentResourceRequest(), + name="name_value", + ) + + +def test_get_persistent_resource_rest_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.ListPersistentResourcesRequest, + dict, + ], +) +def test_list_persistent_resources_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = persistent_resource_service.ListPersistentResourcesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = persistent_resource_service.ListPersistentResourcesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_persistent_resources(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPersistentResourcesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_persistent_resources_rest_required_fields( + request_type=persistent_resource_service.ListPersistentResourcesRequest, +): + transport_class = transports.PersistentResourceServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_persistent_resources._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_persistent_resources._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = persistent_resource_service.ListPersistentResourcesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = ( + persistent_resource_service.ListPersistentResourcesResponse.pb( + return_value + ) + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_persistent_resources(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_persistent_resources_rest_unset_required_fields(): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_persistent_resources._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_persistent_resources_rest_interceptors(null_interceptor): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PersistentResourceServiceRestInterceptor(), + ) + client = PersistentResourceServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "post_list_persistent_resources", + ) as post, mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "pre_list_persistent_resources", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = persistent_resource_service.ListPersistentResourcesRequest.pb( + persistent_resource_service.ListPersistentResourcesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + persistent_resource_service.ListPersistentResourcesResponse.to_json( + persistent_resource_service.ListPersistentResourcesResponse() + ) + ) + + request = persistent_resource_service.ListPersistentResourcesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = ( + persistent_resource_service.ListPersistentResourcesResponse() + ) + + client.list_persistent_resources( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_persistent_resources_rest_bad_request( + transport: str = "rest", + request_type=persistent_resource_service.ListPersistentResourcesRequest, +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_persistent_resources(request) + + +def test_list_persistent_resources_rest_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = persistent_resource_service.ListPersistentResourcesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = persistent_resource_service.ListPersistentResourcesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_persistent_resources(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{parent=projects/*/locations/*}/persistentResources" + % client.transport._host, + args[1], + ) + + +def test_list_persistent_resources_rest_flattened_error(transport: str = "rest"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_persistent_resources( + persistent_resource_service.ListPersistentResourcesRequest(), + parent="parent_value", + ) + + +def test_list_persistent_resources_rest_pager(transport: str = "rest"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + ], + next_page_token="abc", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[], + next_page_token="def", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + ], + next_page_token="ghi", + ), + persistent_resource_service.ListPersistentResourcesResponse( + persistent_resources=[ + persistent_resource.PersistentResource(), + persistent_resource.PersistentResource(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + persistent_resource_service.ListPersistentResourcesResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_persistent_resources(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, persistent_resource.PersistentResource) for i in results + ) + + pages = list(client.list_persistent_resources(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.DeletePersistentResourceRequest, + dict, + ], +) +def test_delete_persistent_resource_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_persistent_resource(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_persistent_resource_rest_required_fields( + request_type=persistent_resource_service.DeletePersistentResourceRequest, +): + transport_class = transports.PersistentResourceServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_persistent_resource._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_persistent_resource._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_persistent_resource(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_persistent_resource_rest_unset_required_fields(): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_persistent_resource._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_persistent_resource_rest_interceptors(null_interceptor): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PersistentResourceServiceRestInterceptor(), + ) + client = PersistentResourceServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "post_delete_persistent_resource", + ) as post, mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "pre_delete_persistent_resource", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = persistent_resource_service.DeletePersistentResourceRequest.pb( + persistent_resource_service.DeletePersistentResourceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = persistent_resource_service.DeletePersistentResourceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_persistent_resource( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_persistent_resource_rest_bad_request( + transport: str = "rest", + request_type=persistent_resource_service.DeletePersistentResourceRequest, +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_persistent_resource(request) + + +def test_delete_persistent_resource_rest_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_persistent_resource(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/persistentResources/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_persistent_resource_rest_flattened_error(transport: str = "rest"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_persistent_resource( + persistent_resource_service.DeletePersistentResourceRequest(), + name="name_value", + ) + + +def test_delete_persistent_resource_rest_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.UpdatePersistentResourceRequest, + dict, + ], +) +def test_update_persistent_resource_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "persistent_resource": { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + } + request_init["persistent_resource"] = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3", + "display_name": "display_name_value", + "resource_pools": [ + { + "id": "id_value", + "machine_spec": { + "machine_type": "machine_type_value", + "accelerator_type": 1, + "accelerator_count": 1805, + "tpu_topology": "tpu_topology_value", + }, + "replica_count": 1384, + "disk_spec": { + "boot_disk_type": "boot_disk_type_value", + "boot_disk_size_gb": 1792, + }, + "used_replica_count": 1912, + "autoscaling_spec": { + "min_replica_count": 1803, + "max_replica_count": 1805, + }, + } + ], + "state": 1, + "error": { + "code": 411, + "message": "message_value", + "details": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + }, + "create_time": {"seconds": 751, "nanos": 543}, + "start_time": {}, + "update_time": {}, + "labels": {}, + "network": "network_value", + "encryption_spec": {"kms_key_name": "kms_key_name_value"}, + "resource_runtime_spec": { + "service_account_spec": { + "enable_custom_service_account": True, + "service_account": "service_account_value", + }, + "ray_spec": {}, + }, + "resource_runtime": {}, + "reserved_ip_ranges": [ + "reserved_ip_ranges_value1", + "reserved_ip_ranges_value2", + ], + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = ( + persistent_resource_service.UpdatePersistentResourceRequest.meta.fields[ + "persistent_resource" + ] + ) + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["persistent_resource"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["persistent_resource"][field])): + del request_init["persistent_resource"][field][i][subfield] + else: + del request_init["persistent_resource"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_persistent_resource(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_update_persistent_resource_rest_required_fields( + request_type=persistent_resource_service.UpdatePersistentResourceRequest, +): + transport_class = transports.PersistentResourceServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_persistent_resource._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_persistent_resource._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_persistent_resource(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_persistent_resource_rest_unset_required_fields(): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_persistent_resource._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "persistentResource", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_persistent_resource_rest_interceptors(null_interceptor): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PersistentResourceServiceRestInterceptor(), + ) + client = PersistentResourceServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "post_update_persistent_resource", + ) as post, mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "pre_update_persistent_resource", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = persistent_resource_service.UpdatePersistentResourceRequest.pb( + persistent_resource_service.UpdatePersistentResourceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = persistent_resource_service.UpdatePersistentResourceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_persistent_resource( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_persistent_resource_rest_bad_request( + transport: str = "rest", + request_type=persistent_resource_service.UpdatePersistentResourceRequest, +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "persistent_resource": { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_persistent_resource(request) + + +def test_update_persistent_resource_rest_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "persistent_resource": { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + } + + # get truthy value for each flattened field + mock_args = dict( + persistent_resource=gca_persistent_resource.PersistentResource( + name="name_value" + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_persistent_resource(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{persistent_resource.name=projects/*/locations/*/persistentResources/*}" + % client.transport._host, + args[1], + ) + + +def test_update_persistent_resource_rest_flattened_error(transport: str = "rest"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_persistent_resource( + persistent_resource_service.UpdatePersistentResourceRequest(), + persistent_resource=gca_persistent_resource.PersistentResource( + name="name_value" + ), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_update_persistent_resource_rest_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.RebootPersistentResourceRequest, + dict, + ], +) +def test_reboot_persistent_resource_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.reboot_persistent_resource(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_reboot_persistent_resource_rest_required_fields( + request_type=persistent_resource_service.RebootPersistentResourceRequest, +): + transport_class = transports.PersistentResourceServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).reboot_persistent_resource._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).reboot_persistent_resource._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.reboot_persistent_resource(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_reboot_persistent_resource_rest_unset_required_fields(): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.reboot_persistent_resource._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_reboot_persistent_resource_rest_interceptors(null_interceptor): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PersistentResourceServiceRestInterceptor(), + ) + client = PersistentResourceServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "post_reboot_persistent_resource", + ) as post, mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "pre_reboot_persistent_resource", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = persistent_resource_service.RebootPersistentResourceRequest.pb( + persistent_resource_service.RebootPersistentResourceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = persistent_resource_service.RebootPersistentResourceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.reboot_persistent_resource( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_reboot_persistent_resource_rest_bad_request( + transport: str = "rest", + request_type=persistent_resource_service.RebootPersistentResourceRequest, +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.reboot_persistent_resource(request) + + +def test_reboot_persistent_resource_rest_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.reboot_persistent_resource(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{name=projects/*/locations/*/persistentResources/*}:reboot" + % client.transport._host, + args[1], + ) + + +def test_reboot_persistent_resource_rest_flattened_error(transport: str = "rest"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.reboot_persistent_resource( + persistent_resource_service.RebootPersistentResourceRequest(), + name="name_value", + ) + + +def test_reboot_persistent_resource_rest_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PersistentResourceServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PersistentResourceServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PersistentResourceServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.PersistentResourceServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PersistentResourceServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = PersistentResourceServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PersistentResourceServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PersistentResourceServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PersistentResourceServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PersistentResourceServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.PersistentResourceServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.PersistentResourceServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PersistentResourceServiceGrpcTransport, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + transports.PersistentResourceServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = PersistentResourceServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.PersistentResourceServiceGrpcTransport, + ) + + +def test_persistent_resource_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PersistentResourceServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_persistent_resource_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1.services.persistent_resource_service.transports.PersistentResourceServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.PersistentResourceServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_persistent_resource", + "get_persistent_resource", + "list_persistent_resources", + "delete_persistent_resource", + "update_persistent_resource", + "reboot_persistent_resource", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + "get_location", + "list_locations", + "get_operation", + "wait_operation", + "cancel_operation", + "delete_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_persistent_resource_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1.services.persistent_resource_service.transports.PersistentResourceServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PersistentResourceServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_persistent_resource_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1.services.persistent_resource_service.transports.PersistentResourceServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PersistentResourceServiceTransport() + adc.assert_called_once() + + +def test_persistent_resource_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PersistentResourceServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PersistentResourceServiceGrpcTransport, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + ], +) +def test_persistent_resource_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PersistentResourceServiceGrpcTransport, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + transports.PersistentResourceServiceRestTransport, + ], +) +def test_persistent_resource_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.PersistentResourceServiceGrpcTransport, grpc_helpers), + (transports.PersistentResourceServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_persistent_resource_service_transport_create_channel( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.PersistentResourceServiceGrpcTransport, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + ], +) +def test_persistent_resource_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_persistent_resource_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.PersistentResourceServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_persistent_resource_service_rest_lro_client(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_persistent_resource_service_host_no_port(transport_name): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_persistent_resource_service_host_with_port(transport_name): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_persistent_resource_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = PersistentResourceServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = PersistentResourceServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_persistent_resource._session + session2 = client2.transport.create_persistent_resource._session + assert session1 != session2 + session1 = client1.transport.get_persistent_resource._session + session2 = client2.transport.get_persistent_resource._session + assert session1 != session2 + session1 = client1.transport.list_persistent_resources._session + session2 = client2.transport.list_persistent_resources._session + assert session1 != session2 + session1 = client1.transport.delete_persistent_resource._session + session2 = client2.transport.delete_persistent_resource._session + assert session1 != session2 + session1 = client1.transport.update_persistent_resource._session + session2 = client2.transport.update_persistent_resource._session + assert session1 != session2 + session1 = client1.transport.reboot_persistent_resource._session + session2 = client2.transport.reboot_persistent_resource._session + assert session1 != session2 + + +def test_persistent_resource_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PersistentResourceServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_persistent_resource_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.PersistentResourceServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.PersistentResourceServiceGrpcTransport, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + ], +) +def test_persistent_resource_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.PersistentResourceServiceGrpcTransport, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + ], +) +def test_persistent_resource_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_persistent_resource_service_grpc_lro_client(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_persistent_resource_service_grpc_lro_async_client(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_network_path(): + project = "squid" + network = "clam" + expected = "projects/{project}/global/networks/{network}".format( + project=project, + network=network, + ) + actual = PersistentResourceServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "whelk", + "network": "octopus", + } + path = PersistentResourceServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = PersistentResourceServiceClient.parse_network_path(path) + assert expected == actual + + +def test_persistent_resource_path(): + project = "oyster" + location = "nudibranch" + persistent_resource = "cuttlefish" + expected = "projects/{project}/locations/{location}/persistentResources/{persistent_resource}".format( + project=project, + location=location, + persistent_resource=persistent_resource, + ) + actual = PersistentResourceServiceClient.persistent_resource_path( + project, location, persistent_resource + ) + assert expected == actual + + +def test_parse_persistent_resource_path(): + expected = { + "project": "mussel", + "location": "winkle", + "persistent_resource": "nautilus", + } + path = PersistentResourceServiceClient.persistent_resource_path(**expected) + + # Check that the path construction is reversible. + actual = PersistentResourceServiceClient.parse_persistent_resource_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "scallop" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = PersistentResourceServiceClient.common_billing_account_path( + billing_account + ) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = PersistentResourceServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PersistentResourceServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "squid" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = PersistentResourceServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "clam", + } + path = PersistentResourceServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PersistentResourceServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "whelk" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = PersistentResourceServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "octopus", + } + path = PersistentResourceServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PersistentResourceServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "oyster" + expected = "projects/{project}".format( + project=project, + ) + actual = PersistentResourceServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = PersistentResourceServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PersistentResourceServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = PersistentResourceServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = PersistentResourceServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PersistentResourceServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.PersistentResourceServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.PersistentResourceServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = PersistentResourceServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_get_location_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.GetLocationRequest +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_location(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.GetLocationRequest, + dict, + ], +) +def test_get_location_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.Location() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_location(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_list_locations_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.ListLocationsRequest +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({"name": "projects/sample1"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_locations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.ListLocationsRequest, + dict, + ], +) +def test_list_locations_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_locations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_get_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_set_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_test_iam_permissions_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + +def test_cancel_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.CancelOperationRequest +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.DeleteOperationRequest +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.DeleteOperationRequest, + dict, + ], +) +def test_delete_operation_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.ListOperationsRequest +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_wait_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.WaitOperationRequest +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.WaitOperationRequest, + dict, + ], +) +def test_wait_operation_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.wait_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_delete_operation(transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_delete_operation_from_dict(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_wait_operation(transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc_asyncio"): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_wait_operation_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_wait_operation_from_dict(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_list_locations_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_locations_from_dict(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_get_location_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +def test_get_location_from_dict(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +def test_get_iam_policy(transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_test_iam_permissions(transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + ( + PersistentResourceServiceClient, + transports.PersistentResourceServiceGrpcTransport, + ), + ( + PersistentResourceServiceAsyncClient, + transports.PersistentResourceServiceGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_evaluation_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_evaluation_service.py new file mode 100644 index 0000000000..30f0d77cd6 --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_evaluation_service.py @@ -0,0 +1,4428 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from google.api_core import api_core_version +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.evaluation_service import ( + EvaluationServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.evaluation_service import ( + EvaluationServiceClient, +) +from google.cloud.aiplatform_v1beta1.services.evaluation_service import transports +from google.cloud.aiplatform_v1beta1.types import evaluation_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert EvaluationServiceClient._get_default_mtls_endpoint(None) is None + assert ( + EvaluationServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + EvaluationServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + EvaluationServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + EvaluationServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + EvaluationServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test__read_environment_variables(): + assert EvaluationServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert EvaluationServiceClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert EvaluationServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + EvaluationServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert EvaluationServiceClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert EvaluationServiceClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert EvaluationServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + EvaluationServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert EvaluationServiceClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert EvaluationServiceClient._get_client_cert_source(None, False) is None + assert ( + EvaluationServiceClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + EvaluationServiceClient._get_client_cert_source(mock_provided_cert_source, True) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + EvaluationServiceClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + EvaluationServiceClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + EvaluationServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(EvaluationServiceClient), +) +@mock.patch.object( + EvaluationServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(EvaluationServiceAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = EvaluationServiceClient._DEFAULT_UNIVERSE + default_endpoint = EvaluationServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = EvaluationServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + EvaluationServiceClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + EvaluationServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == EvaluationServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + EvaluationServiceClient._get_api_endpoint(None, None, default_universe, "auto") + == default_endpoint + ) + assert ( + EvaluationServiceClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == EvaluationServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + EvaluationServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == EvaluationServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + EvaluationServiceClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + EvaluationServiceClient._get_api_endpoint(None, None, default_universe, "never") + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + EvaluationServiceClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + EvaluationServiceClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + EvaluationServiceClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + EvaluationServiceClient._get_universe_domain(None, None) + == EvaluationServiceClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + EvaluationServiceClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (EvaluationServiceClient, transports.EvaluationServiceGrpcTransport, "grpc"), + (EvaluationServiceClient, transports.EvaluationServiceRestTransport, "rest"), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (EvaluationServiceClient, "grpc"), + (EvaluationServiceAsyncClient, "grpc_asyncio"), + (EvaluationServiceClient, "rest"), + ], +) +def test_evaluation_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.EvaluationServiceGrpcTransport, "grpc"), + (transports.EvaluationServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.EvaluationServiceRestTransport, "rest"), + ], +) +def test_evaluation_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (EvaluationServiceClient, "grpc"), + (EvaluationServiceAsyncClient, "grpc_asyncio"), + (EvaluationServiceClient, "rest"), + ], +) +def test_evaluation_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +def test_evaluation_service_client_get_transport_class(): + transport = EvaluationServiceClient.get_transport_class() + available_transports = [ + transports.EvaluationServiceGrpcTransport, + transports.EvaluationServiceRestTransport, + ] + assert transport in available_transports + + transport = EvaluationServiceClient.get_transport_class("grpc") + assert transport == transports.EvaluationServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (EvaluationServiceClient, transports.EvaluationServiceGrpcTransport, "grpc"), + ( + EvaluationServiceAsyncClient, + transports.EvaluationServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (EvaluationServiceClient, transports.EvaluationServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + EvaluationServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(EvaluationServiceClient), +) +@mock.patch.object( + EvaluationServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(EvaluationServiceAsyncClient), +) +def test_evaluation_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(EvaluationServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(EvaluationServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + EvaluationServiceClient, + transports.EvaluationServiceGrpcTransport, + "grpc", + "true", + ), + ( + EvaluationServiceAsyncClient, + transports.EvaluationServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + EvaluationServiceClient, + transports.EvaluationServiceGrpcTransport, + "grpc", + "false", + ), + ( + EvaluationServiceAsyncClient, + transports.EvaluationServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + EvaluationServiceClient, + transports.EvaluationServiceRestTransport, + "rest", + "true", + ), + ( + EvaluationServiceClient, + transports.EvaluationServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + EvaluationServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(EvaluationServiceClient), +) +@mock.patch.object( + EvaluationServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(EvaluationServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_evaluation_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [EvaluationServiceClient, EvaluationServiceAsyncClient] +) +@mock.patch.object( + EvaluationServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(EvaluationServiceClient), +) +@mock.patch.object( + EvaluationServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(EvaluationServiceAsyncClient), +) +def test_evaluation_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", [EvaluationServiceClient, EvaluationServiceAsyncClient] +) +@mock.patch.object( + EvaluationServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(EvaluationServiceClient), +) +@mock.patch.object( + EvaluationServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(EvaluationServiceAsyncClient), +) +def test_evaluation_service_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = EvaluationServiceClient._DEFAULT_UNIVERSE + default_endpoint = EvaluationServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = EvaluationServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (EvaluationServiceClient, transports.EvaluationServiceGrpcTransport, "grpc"), + ( + EvaluationServiceAsyncClient, + transports.EvaluationServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (EvaluationServiceClient, transports.EvaluationServiceRestTransport, "rest"), + ], +) +def test_evaluation_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + EvaluationServiceClient, + transports.EvaluationServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + EvaluationServiceAsyncClient, + transports.EvaluationServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + EvaluationServiceClient, + transports.EvaluationServiceRestTransport, + "rest", + None, + ), + ], +) +def test_evaluation_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_evaluation_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.evaluation_service.transports.EvaluationServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = EvaluationServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + EvaluationServiceClient, + transports.EvaluationServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + EvaluationServiceAsyncClient, + transports.EvaluationServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_evaluation_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + evaluation_service.EvaluateInstancesRequest, + dict, + ], +) +def test_evaluate_instances(request_type, transport: str = "grpc"): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.evaluate_instances), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = evaluation_service.EvaluateInstancesResponse() + response = client.evaluate_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = evaluation_service.EvaluateInstancesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, evaluation_service.EvaluateInstancesResponse) + + +def test_evaluate_instances_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.evaluate_instances), "__call__" + ) as call: + client.evaluate_instances() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == evaluation_service.EvaluateInstancesRequest() + + +def test_evaluate_instances_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = evaluation_service.EvaluateInstancesRequest( + location="location_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.evaluate_instances), "__call__" + ) as call: + client.evaluate_instances(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == evaluation_service.EvaluateInstancesRequest( + location="location_value", + ) + + +@pytest.mark.asyncio +async def test_evaluate_instances_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.evaluate_instances), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + evaluation_service.EvaluateInstancesResponse() + ) + response = await client.evaluate_instances() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == evaluation_service.EvaluateInstancesRequest() + + +@pytest.mark.asyncio +async def test_evaluate_instances_async( + transport: str = "grpc_asyncio", + request_type=evaluation_service.EvaluateInstancesRequest, +): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.evaluate_instances), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + evaluation_service.EvaluateInstancesResponse() + ) + response = await client.evaluate_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = evaluation_service.EvaluateInstancesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, evaluation_service.EvaluateInstancesResponse) + + +@pytest.mark.asyncio +async def test_evaluate_instances_async_from_dict(): + await test_evaluate_instances_async(request_type=dict) + + +def test_evaluate_instances_field_headers(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = evaluation_service.EvaluateInstancesRequest() + + request.location = "location_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.evaluate_instances), "__call__" + ) as call: + call.return_value = evaluation_service.EvaluateInstancesResponse() + client.evaluate_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "location=location_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_evaluate_instances_field_headers_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = evaluation_service.EvaluateInstancesRequest() + + request.location = "location_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.evaluate_instances), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + evaluation_service.EvaluateInstancesResponse() + ) + await client.evaluate_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "location=location_value", + ) in kw["metadata"] + + +@pytest.mark.parametrize( + "request_type", + [ + evaluation_service.EvaluateInstancesRequest, + dict, + ], +) +def test_evaluate_instances_rest(request_type): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"location": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = evaluation_service.EvaluateInstancesResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = evaluation_service.EvaluateInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.evaluate_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, evaluation_service.EvaluateInstancesResponse) + + +def test_evaluate_instances_rest_required_fields( + request_type=evaluation_service.EvaluateInstancesRequest, +): + transport_class = transports.EvaluationServiceRestTransport + + request_init = {} + request_init["location"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).evaluate_instances._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["location"] = "location_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).evaluate_instances._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "location" in jsonified_request + assert jsonified_request["location"] == "location_value" + + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = evaluation_service.EvaluateInstancesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = evaluation_service.EvaluateInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.evaluate_instances(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_evaluate_instances_rest_unset_required_fields(): + transport = transports.EvaluationServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.evaluate_instances._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("location",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_evaluate_instances_rest_interceptors(null_interceptor): + transport = transports.EvaluationServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.EvaluationServiceRestInterceptor(), + ) + client = EvaluationServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.EvaluationServiceRestInterceptor, "post_evaluate_instances" + ) as post, mock.patch.object( + transports.EvaluationServiceRestInterceptor, "pre_evaluate_instances" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = evaluation_service.EvaluateInstancesRequest.pb( + evaluation_service.EvaluateInstancesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + evaluation_service.EvaluateInstancesResponse.to_json( + evaluation_service.EvaluateInstancesResponse() + ) + ) + + request = evaluation_service.EvaluateInstancesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = evaluation_service.EvaluateInstancesResponse() + + client.evaluate_instances( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_evaluate_instances_rest_bad_request( + transport: str = "rest", request_type=evaluation_service.EvaluateInstancesRequest +): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"location": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.evaluate_instances(request) + + +def test_evaluate_instances_rest_error(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.EvaluationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.EvaluationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EvaluationServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.EvaluationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = EvaluationServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = EvaluationServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.EvaluationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = EvaluationServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.EvaluationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = EvaluationServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.EvaluationServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.EvaluationServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.EvaluationServiceGrpcTransport, + transports.EvaluationServiceGrpcAsyncIOTransport, + transports.EvaluationServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = EvaluationServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.EvaluationServiceGrpcTransport, + ) + + +def test_evaluation_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.EvaluationServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_evaluation_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.evaluation_service.transports.EvaluationServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.EvaluationServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "evaluate_instances", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + "get_location", + "list_locations", + "get_operation", + "wait_operation", + "cancel_operation", + "delete_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_evaluation_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.evaluation_service.transports.EvaluationServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.EvaluationServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_evaluation_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.evaluation_service.transports.EvaluationServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.EvaluationServiceTransport() + adc.assert_called_once() + + +def test_evaluation_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + EvaluationServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.EvaluationServiceGrpcTransport, + transports.EvaluationServiceGrpcAsyncIOTransport, + ], +) +def test_evaluation_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.EvaluationServiceGrpcTransport, + transports.EvaluationServiceGrpcAsyncIOTransport, + transports.EvaluationServiceRestTransport, + ], +) +def test_evaluation_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.EvaluationServiceGrpcTransport, grpc_helpers), + (transports.EvaluationServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_evaluation_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.EvaluationServiceGrpcTransport, + transports.EvaluationServiceGrpcAsyncIOTransport, + ], +) +def test_evaluation_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_evaluation_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.EvaluationServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_evaluation_service_host_no_port(transport_name): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_evaluation_service_host_with_port(transport_name): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_evaluation_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = EvaluationServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = EvaluationServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.evaluate_instances._session + session2 = client2.transport.evaluate_instances._session + assert session1 != session2 + + +def test_evaluation_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.EvaluationServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_evaluation_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.EvaluationServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.EvaluationServiceGrpcTransport, + transports.EvaluationServiceGrpcAsyncIOTransport, + ], +) +def test_evaluation_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.EvaluationServiceGrpcTransport, + transports.EvaluationServiceGrpcAsyncIOTransport, + ], +) +def test_evaluation_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = EvaluationServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = EvaluationServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = EvaluationServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = EvaluationServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = EvaluationServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = EvaluationServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = EvaluationServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = EvaluationServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = EvaluationServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format( + project=project, + ) + actual = EvaluationServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = EvaluationServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = EvaluationServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = EvaluationServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = EvaluationServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = EvaluationServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.EvaluationServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.EvaluationServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = EvaluationServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_get_location_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.GetLocationRequest +): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_location(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.GetLocationRequest, + dict, + ], +) +def test_get_location_rest(request_type): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.Location() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_location(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_list_locations_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.ListLocationsRequest +): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({"name": "projects/sample1"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_locations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.ListLocationsRequest, + dict, + ], +) +def test_list_locations_rest(request_type): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_locations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_get_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest(request_type): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_set_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest(request_type): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_test_iam_permissions_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest +): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest(request_type): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + +def test_cancel_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.CancelOperationRequest +): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.DeleteOperationRequest +): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.DeleteOperationRequest, + dict, + ], +) +def test_delete_operation_rest(request_type): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.ListOperationsRequest +): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_wait_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.WaitOperationRequest +): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.WaitOperationRequest, + dict, + ], +) +def test_wait_operation_rest(request_type): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.wait_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_delete_operation(transport: str = "grpc"): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_field_headers(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_delete_operation_from_dict(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_wait_operation(transport: str = "grpc"): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc_asyncio"): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_wait_operation_field_headers(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_wait_operation_from_dict(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_list_locations_field_headers(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_locations_from_dict(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_get_location_field_headers(): + client = EvaluationServiceClient(credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +def test_get_location_from_dict(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_field_headers(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +def test_get_iam_policy(transport: str = "grpc"): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_test_iam_permissions(transport: str = "grpc"): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = EvaluationServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = EvaluationServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (EvaluationServiceClient, transports.EvaluationServiceGrpcTransport), + ( + EvaluationServiceAsyncClient, + transports.EvaluationServiceGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_extension_execution_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_extension_execution_service.py new file mode 100644 index 0000000000..68a77d8473 --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_extension_execution_service.py @@ -0,0 +1,5323 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from google.api_core import api_core_version +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.extension_execution_service import ( + ExtensionExecutionServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.extension_execution_service import ( + ExtensionExecutionServiceClient, +) +from google.cloud.aiplatform_v1beta1.services.extension_execution_service import ( + transports, +) +from google.cloud.aiplatform_v1beta1.types import content +from google.cloud.aiplatform_v1beta1.types import extension +from google.cloud.aiplatform_v1beta1.types import extension_execution_service +from google.cloud.aiplatform_v1beta1.types import tool +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ExtensionExecutionServiceClient._get_default_mtls_endpoint(None) is None + assert ( + ExtensionExecutionServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + ExtensionExecutionServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + ExtensionExecutionServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ExtensionExecutionServiceClient._get_default_mtls_endpoint( + sandbox_mtls_endpoint + ) + == sandbox_mtls_endpoint + ) + assert ( + ExtensionExecutionServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test__read_environment_variables(): + assert ExtensionExecutionServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert ExtensionExecutionServiceClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert ExtensionExecutionServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + ExtensionExecutionServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert ExtensionExecutionServiceClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert ExtensionExecutionServiceClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert ExtensionExecutionServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + ExtensionExecutionServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert ExtensionExecutionServiceClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert ExtensionExecutionServiceClient._get_client_cert_source(None, False) is None + assert ( + ExtensionExecutionServiceClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + ExtensionExecutionServiceClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + ExtensionExecutionServiceClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + ExtensionExecutionServiceClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + ExtensionExecutionServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionExecutionServiceClient), +) +@mock.patch.object( + ExtensionExecutionServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionExecutionServiceAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = ExtensionExecutionServiceClient._DEFAULT_UNIVERSE + default_endpoint = ( + ExtensionExecutionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + ) + mock_universe = "bar.com" + mock_endpoint = ExtensionExecutionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + ExtensionExecutionServiceClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + ExtensionExecutionServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == ExtensionExecutionServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + ExtensionExecutionServiceClient._get_api_endpoint( + None, None, default_universe, "auto" + ) + == default_endpoint + ) + assert ( + ExtensionExecutionServiceClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == ExtensionExecutionServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + ExtensionExecutionServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == ExtensionExecutionServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + ExtensionExecutionServiceClient._get_api_endpoint( + None, None, mock_universe, "never" + ) + == mock_endpoint + ) + assert ( + ExtensionExecutionServiceClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + ExtensionExecutionServiceClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + ExtensionExecutionServiceClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + ExtensionExecutionServiceClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + ExtensionExecutionServiceClient._get_universe_domain(None, None) + == ExtensionExecutionServiceClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + ExtensionExecutionServiceClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceGrpcTransport, + "grpc", + ), + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceRestTransport, + "rest", + ), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (ExtensionExecutionServiceClient, "grpc"), + (ExtensionExecutionServiceAsyncClient, "grpc_asyncio"), + (ExtensionExecutionServiceClient, "rest"), + ], +) +def test_extension_execution_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.ExtensionExecutionServiceGrpcTransport, "grpc"), + (transports.ExtensionExecutionServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.ExtensionExecutionServiceRestTransport, "rest"), + ], +) +def test_extension_execution_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (ExtensionExecutionServiceClient, "grpc"), + (ExtensionExecutionServiceAsyncClient, "grpc_asyncio"), + (ExtensionExecutionServiceClient, "rest"), + ], +) +def test_extension_execution_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +def test_extension_execution_service_client_get_transport_class(): + transport = ExtensionExecutionServiceClient.get_transport_class() + available_transports = [ + transports.ExtensionExecutionServiceGrpcTransport, + transports.ExtensionExecutionServiceRestTransport, + ] + assert transport in available_transports + + transport = ExtensionExecutionServiceClient.get_transport_class("grpc") + assert transport == transports.ExtensionExecutionServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceGrpcTransport, + "grpc", + ), + ( + ExtensionExecutionServiceAsyncClient, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceRestTransport, + "rest", + ), + ], +) +@mock.patch.object( + ExtensionExecutionServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionExecutionServiceClient), +) +@mock.patch.object( + ExtensionExecutionServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionExecutionServiceAsyncClient), +) +def test_extension_execution_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object( + ExtensionExecutionServiceClient, "get_transport_class" + ) as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object( + ExtensionExecutionServiceClient, "get_transport_class" + ) as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceGrpcTransport, + "grpc", + "true", + ), + ( + ExtensionExecutionServiceAsyncClient, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceGrpcTransport, + "grpc", + "false", + ), + ( + ExtensionExecutionServiceAsyncClient, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceRestTransport, + "rest", + "true", + ), + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + ExtensionExecutionServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionExecutionServiceClient), +) +@mock.patch.object( + ExtensionExecutionServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionExecutionServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_extension_execution_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", + [ExtensionExecutionServiceClient, ExtensionExecutionServiceAsyncClient], +) +@mock.patch.object( + ExtensionExecutionServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ExtensionExecutionServiceClient), +) +@mock.patch.object( + ExtensionExecutionServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ExtensionExecutionServiceAsyncClient), +) +def test_extension_execution_service_client_get_mtls_endpoint_and_cert_source( + client_class, +): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", + [ExtensionExecutionServiceClient, ExtensionExecutionServiceAsyncClient], +) +@mock.patch.object( + ExtensionExecutionServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionExecutionServiceClient), +) +@mock.patch.object( + ExtensionExecutionServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionExecutionServiceAsyncClient), +) +def test_extension_execution_service_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = ExtensionExecutionServiceClient._DEFAULT_UNIVERSE + default_endpoint = ( + ExtensionExecutionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + ) + mock_universe = "bar.com" + mock_endpoint = ExtensionExecutionServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceGrpcTransport, + "grpc", + ), + ( + ExtensionExecutionServiceAsyncClient, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceRestTransport, + "rest", + ), + ], +) +def test_extension_execution_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + ExtensionExecutionServiceAsyncClient, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceRestTransport, + "rest", + None, + ), + ], +) +def test_extension_execution_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_extension_execution_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.extension_execution_service.transports.ExtensionExecutionServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = ExtensionExecutionServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + ExtensionExecutionServiceAsyncClient, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_extension_execution_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + extension_execution_service.ExecuteExtensionRequest, + dict, + ], +) +def test_execute_extension(request_type, transport: str = "grpc"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_extension), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = extension_execution_service.ExecuteExtensionResponse( + content="content_value", + ) + response = client.execute_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = extension_execution_service.ExecuteExtensionRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, extension_execution_service.ExecuteExtensionResponse) + assert response.content == "content_value" + + +def test_execute_extension_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_extension), "__call__" + ) as call: + client.execute_extension() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_execution_service.ExecuteExtensionRequest() + + +def test_execute_extension_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = extension_execution_service.ExecuteExtensionRequest( + name="name_value", + operation_id="operation_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_extension), "__call__" + ) as call: + client.execute_extension(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_execution_service.ExecuteExtensionRequest( + name="name_value", + operation_id="operation_id_value", + ) + + +@pytest.mark.asyncio +async def test_execute_extension_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_extension), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension_execution_service.ExecuteExtensionResponse( + content="content_value", + ) + ) + response = await client.execute_extension() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_execution_service.ExecuteExtensionRequest() + + +@pytest.mark.asyncio +async def test_execute_extension_async( + transport: str = "grpc_asyncio", + request_type=extension_execution_service.ExecuteExtensionRequest, +): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_extension), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension_execution_service.ExecuteExtensionResponse( + content="content_value", + ) + ) + response = await client.execute_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = extension_execution_service.ExecuteExtensionRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, extension_execution_service.ExecuteExtensionResponse) + assert response.content == "content_value" + + +@pytest.mark.asyncio +async def test_execute_extension_async_from_dict(): + await test_execute_extension_async(request_type=dict) + + +def test_execute_extension_field_headers(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_execution_service.ExecuteExtensionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_extension), "__call__" + ) as call: + call.return_value = extension_execution_service.ExecuteExtensionResponse() + client.execute_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_execute_extension_field_headers_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_execution_service.ExecuteExtensionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_extension), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension_execution_service.ExecuteExtensionResponse() + ) + await client.execute_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_execute_extension_flattened(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_extension), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = extension_execution_service.ExecuteExtensionResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.execute_extension( + name="name_value", + operation_id="operation_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].operation_id + mock_val = "operation_id_value" + assert arg == mock_val + + +def test_execute_extension_flattened_error(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.execute_extension( + extension_execution_service.ExecuteExtensionRequest(), + name="name_value", + operation_id="operation_id_value", + ) + + +@pytest.mark.asyncio +async def test_execute_extension_flattened_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.execute_extension), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = extension_execution_service.ExecuteExtensionResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension_execution_service.ExecuteExtensionResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.execute_extension( + name="name_value", + operation_id="operation_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].operation_id + mock_val = "operation_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_execute_extension_flattened_error_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.execute_extension( + extension_execution_service.ExecuteExtensionRequest(), + name="name_value", + operation_id="operation_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + extension_execution_service.QueryExtensionRequest, + dict, + ], +) +def test_query_extension(request_type, transport: str = "grpc"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = extension_execution_service.QueryExtensionResponse( + failure_message="failure_message_value", + ) + response = client.query_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = extension_execution_service.QueryExtensionRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, extension_execution_service.QueryExtensionResponse) + assert response.failure_message == "failure_message_value" + + +def test_query_extension_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_extension), "__call__") as call: + client.query_extension() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_execution_service.QueryExtensionRequest() + + +def test_query_extension_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = extension_execution_service.QueryExtensionRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_extension), "__call__") as call: + client.query_extension(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_execution_service.QueryExtensionRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_query_extension_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension_execution_service.QueryExtensionResponse( + failure_message="failure_message_value", + ) + ) + response = await client.query_extension() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_execution_service.QueryExtensionRequest() + + +@pytest.mark.asyncio +async def test_query_extension_async( + transport: str = "grpc_asyncio", + request_type=extension_execution_service.QueryExtensionRequest, +): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension_execution_service.QueryExtensionResponse( + failure_message="failure_message_value", + ) + ) + response = await client.query_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = extension_execution_service.QueryExtensionRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, extension_execution_service.QueryExtensionResponse) + assert response.failure_message == "failure_message_value" + + +@pytest.mark.asyncio +async def test_query_extension_async_from_dict(): + await test_query_extension_async(request_type=dict) + + +def test_query_extension_field_headers(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_execution_service.QueryExtensionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_extension), "__call__") as call: + call.return_value = extension_execution_service.QueryExtensionResponse() + client.query_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_query_extension_field_headers_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_execution_service.QueryExtensionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_extension), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension_execution_service.QueryExtensionResponse() + ) + await client.query_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_query_extension_flattened(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = extension_execution_service.QueryExtensionResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.query_extension( + name="name_value", + contents=[content.Content(role="role_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(role="role_value")] + assert arg == mock_val + + +def test_query_extension_flattened_error(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_extension( + extension_execution_service.QueryExtensionRequest(), + name="name_value", + contents=[content.Content(role="role_value")], + ) + + +@pytest.mark.asyncio +async def test_query_extension_flattened_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.query_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = extension_execution_service.QueryExtensionResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension_execution_service.QueryExtensionResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.query_extension( + name="name_value", + contents=[content.Content(role="role_value")], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].contents + mock_val = [content.Content(role="role_value")] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_query_extension_flattened_error_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.query_extension( + extension_execution_service.QueryExtensionRequest(), + name="name_value", + contents=[content.Content(role="role_value")], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + extension_execution_service.ExecuteExtensionRequest, + dict, + ], +) +def test_execute_extension_rest(request_type): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/extensions/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = extension_execution_service.ExecuteExtensionResponse( + content="content_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = extension_execution_service.ExecuteExtensionResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.execute_extension(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, extension_execution_service.ExecuteExtensionResponse) + assert response.content == "content_value" + + +def test_execute_extension_rest_required_fields( + request_type=extension_execution_service.ExecuteExtensionRequest, +): + transport_class = transports.ExtensionExecutionServiceRestTransport + + request_init = {} + request_init["name"] = "" + request_init["operation_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).execute_extension._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + jsonified_request["operationId"] = "operation_id_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).execute_extension._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + assert "operationId" in jsonified_request + assert jsonified_request["operationId"] == "operation_id_value" + + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = extension_execution_service.ExecuteExtensionResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = extension_execution_service.ExecuteExtensionResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.execute_extension(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_execute_extension_rest_unset_required_fields(): + transport = transports.ExtensionExecutionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.execute_extension._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "operationId", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_execute_extension_rest_interceptors(null_interceptor): + transport = transports.ExtensionExecutionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ExtensionExecutionServiceRestInterceptor(), + ) + client = ExtensionExecutionServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ExtensionExecutionServiceRestInterceptor, "post_execute_extension" + ) as post, mock.patch.object( + transports.ExtensionExecutionServiceRestInterceptor, "pre_execute_extension" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = extension_execution_service.ExecuteExtensionRequest.pb( + extension_execution_service.ExecuteExtensionRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + extension_execution_service.ExecuteExtensionResponse.to_json( + extension_execution_service.ExecuteExtensionResponse() + ) + ) + + request = extension_execution_service.ExecuteExtensionRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = extension_execution_service.ExecuteExtensionResponse() + + client.execute_extension( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_execute_extension_rest_bad_request( + transport: str = "rest", + request_type=extension_execution_service.ExecuteExtensionRequest, +): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/extensions/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.execute_extension(request) + + +def test_execute_extension_rest_flattened(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = extension_execution_service.ExecuteExtensionResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/extensions/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + operation_id="operation_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = extension_execution_service.ExecuteExtensionResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.execute_extension(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/extensions/*}:execute" + % client.transport._host, + args[1], + ) + + +def test_execute_extension_rest_flattened_error(transport: str = "rest"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.execute_extension( + extension_execution_service.ExecuteExtensionRequest(), + name="name_value", + operation_id="operation_id_value", + ) + + +def test_execute_extension_rest_error(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + extension_execution_service.QueryExtensionRequest, + dict, + ], +) +def test_query_extension_rest(request_type): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/extensions/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = extension_execution_service.QueryExtensionResponse( + failure_message="failure_message_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = extension_execution_service.QueryExtensionResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.query_extension(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, extension_execution_service.QueryExtensionResponse) + assert response.failure_message == "failure_message_value" + + +def test_query_extension_rest_required_fields( + request_type=extension_execution_service.QueryExtensionRequest, +): + transport_class = transports.ExtensionExecutionServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).query_extension._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).query_extension._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = extension_execution_service.QueryExtensionResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = extension_execution_service.QueryExtensionResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.query_extension(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_query_extension_rest_unset_required_fields(): + transport = transports.ExtensionExecutionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.query_extension._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "name", + "contents", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_query_extension_rest_interceptors(null_interceptor): + transport = transports.ExtensionExecutionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ExtensionExecutionServiceRestInterceptor(), + ) + client = ExtensionExecutionServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ExtensionExecutionServiceRestInterceptor, "post_query_extension" + ) as post, mock.patch.object( + transports.ExtensionExecutionServiceRestInterceptor, "pre_query_extension" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = extension_execution_service.QueryExtensionRequest.pb( + extension_execution_service.QueryExtensionRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + extension_execution_service.QueryExtensionResponse.to_json( + extension_execution_service.QueryExtensionResponse() + ) + ) + + request = extension_execution_service.QueryExtensionRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = extension_execution_service.QueryExtensionResponse() + + client.query_extension( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_query_extension_rest_bad_request( + transport: str = "rest", + request_type=extension_execution_service.QueryExtensionRequest, +): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/extensions/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.query_extension(request) + + +def test_query_extension_rest_flattened(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = extension_execution_service.QueryExtensionResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/extensions/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + contents=[content.Content(role="role_value")], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = extension_execution_service.QueryExtensionResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.query_extension(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/extensions/*}:query" + % client.transport._host, + args[1], + ) + + +def test_query_extension_rest_flattened_error(transport: str = "rest"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.query_extension( + extension_execution_service.QueryExtensionRequest(), + name="name_value", + contents=[content.Content(role="role_value")], + ) + + +def test_query_extension_rest_error(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ExtensionExecutionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ExtensionExecutionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ExtensionExecutionServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.ExtensionExecutionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ExtensionExecutionServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ExtensionExecutionServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ExtensionExecutionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ExtensionExecutionServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ExtensionExecutionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ExtensionExecutionServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ExtensionExecutionServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ExtensionExecutionServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ExtensionExecutionServiceGrpcTransport, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + transports.ExtensionExecutionServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = ExtensionExecutionServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ExtensionExecutionServiceGrpcTransport, + ) + + +def test_extension_execution_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ExtensionExecutionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_extension_execution_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.extension_execution_service.transports.ExtensionExecutionServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.ExtensionExecutionServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "execute_extension", + "query_extension", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + "get_location", + "list_locations", + "get_operation", + "wait_operation", + "cancel_operation", + "delete_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_extension_execution_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.extension_execution_service.transports.ExtensionExecutionServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ExtensionExecutionServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_extension_execution_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.extension_execution_service.transports.ExtensionExecutionServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ExtensionExecutionServiceTransport() + adc.assert_called_once() + + +def test_extension_execution_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ExtensionExecutionServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ExtensionExecutionServiceGrpcTransport, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + ], +) +def test_extension_execution_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ExtensionExecutionServiceGrpcTransport, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + transports.ExtensionExecutionServiceRestTransport, + ], +) +def test_extension_execution_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ExtensionExecutionServiceGrpcTransport, grpc_helpers), + (transports.ExtensionExecutionServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_extension_execution_service_transport_create_channel( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ExtensionExecutionServiceGrpcTransport, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + ], +) +def test_extension_execution_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_extension_execution_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.ExtensionExecutionServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_extension_execution_service_host_no_port(transport_name): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_extension_execution_service_host_with_port(transport_name): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_extension_execution_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = ExtensionExecutionServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = ExtensionExecutionServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.execute_extension._session + session2 = client2.transport.execute_extension._session + assert session1 != session2 + session1 = client1.transport.query_extension._session + session2 = client2.transport.query_extension._session + assert session1 != session2 + + +def test_extension_execution_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ExtensionExecutionServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_extension_execution_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ExtensionExecutionServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.ExtensionExecutionServiceGrpcTransport, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + ], +) +def test_extension_execution_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.ExtensionExecutionServiceGrpcTransport, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + ], +) +def test_extension_execution_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_extension_path(): + project = "squid" + location = "clam" + extension = "whelk" + expected = "projects/{project}/locations/{location}/extensions/{extension}".format( + project=project, + location=location, + extension=extension, + ) + actual = ExtensionExecutionServiceClient.extension_path( + project, location, extension + ) + assert expected == actual + + +def test_parse_extension_path(): + expected = { + "project": "octopus", + "location": "oyster", + "extension": "nudibranch", + } + path = ExtensionExecutionServiceClient.extension_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionExecutionServiceClient.parse_extension_path(path) + assert expected == actual + + +def test_secret_version_path(): + project = "cuttlefish" + secret = "mussel" + secret_version = "winkle" + expected = "projects/{project}/secrets/{secret}/versions/{secret_version}".format( + project=project, + secret=secret, + secret_version=secret_version, + ) + actual = ExtensionExecutionServiceClient.secret_version_path( + project, secret, secret_version + ) + assert expected == actual + + +def test_parse_secret_version_path(): + expected = { + "project": "nautilus", + "secret": "scallop", + "secret_version": "abalone", + } + path = ExtensionExecutionServiceClient.secret_version_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionExecutionServiceClient.parse_secret_version_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = ExtensionExecutionServiceClient.common_billing_account_path( + billing_account + ) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ExtensionExecutionServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionExecutionServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = ExtensionExecutionServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ExtensionExecutionServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionExecutionServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = ExtensionExecutionServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ExtensionExecutionServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionExecutionServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format( + project=project, + ) + actual = ExtensionExecutionServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ExtensionExecutionServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionExecutionServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = ExtensionExecutionServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ExtensionExecutionServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionExecutionServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.ExtensionExecutionServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.ExtensionExecutionServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = ExtensionExecutionServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_get_location_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.GetLocationRequest +): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_location(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.GetLocationRequest, + dict, + ], +) +def test_get_location_rest(request_type): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.Location() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_location(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_list_locations_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.ListLocationsRequest +): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({"name": "projects/sample1"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_locations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.ListLocationsRequest, + dict, + ], +) +def test_list_locations_rest(request_type): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_locations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_get_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest(request_type): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_set_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest(request_type): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_test_iam_permissions_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest +): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest(request_type): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + +def test_cancel_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.CancelOperationRequest +): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.DeleteOperationRequest +): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.DeleteOperationRequest, + dict, + ], +) +def test_delete_operation_rest(request_type): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.ListOperationsRequest +): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_wait_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.WaitOperationRequest +): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.WaitOperationRequest, + dict, + ], +) +def test_wait_operation_rest(request_type): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.wait_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_delete_operation(transport: str = "grpc"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_field_headers(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_delete_operation_from_dict(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_wait_operation(transport: str = "grpc"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc_asyncio"): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_wait_operation_field_headers(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_wait_operation_from_dict(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_list_locations_field_headers(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_locations_from_dict(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_get_location_field_headers(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +def test_get_location_from_dict(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_field_headers(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +def test_get_iam_policy(transport: str = "grpc"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_test_iam_permissions(transport: str = "grpc"): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = ExtensionExecutionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = ExtensionExecutionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + ( + ExtensionExecutionServiceClient, + transports.ExtensionExecutionServiceGrpcTransport, + ), + ( + ExtensionExecutionServiceAsyncClient, + transports.ExtensionExecutionServiceGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py new file mode 100644 index 0000000000..469dc611fa --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_extension_registry_service.py @@ -0,0 +1,7665 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from google.api_core import api_core_version +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.extension_registry_service import ( + ExtensionRegistryServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.extension_registry_service import ( + ExtensionRegistryServiceClient, +) +from google.cloud.aiplatform_v1beta1.services.extension_registry_service import pagers +from google.cloud.aiplatform_v1beta1.services.extension_registry_service import ( + transports, +) +from google.cloud.aiplatform_v1beta1.types import extension +from google.cloud.aiplatform_v1beta1.types import extension as gca_extension +from google.cloud.aiplatform_v1beta1.types import extension_registry_service +from google.cloud.aiplatform_v1beta1.types import openapi +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import tool +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ExtensionRegistryServiceClient._get_default_mtls_endpoint(None) is None + assert ( + ExtensionRegistryServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + ExtensionRegistryServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + ExtensionRegistryServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ExtensionRegistryServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + ExtensionRegistryServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test__read_environment_variables(): + assert ExtensionRegistryServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert ExtensionRegistryServiceClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert ExtensionRegistryServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + ExtensionRegistryServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert ExtensionRegistryServiceClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert ExtensionRegistryServiceClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert ExtensionRegistryServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + ExtensionRegistryServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert ExtensionRegistryServiceClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert ExtensionRegistryServiceClient._get_client_cert_source(None, False) is None + assert ( + ExtensionRegistryServiceClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + ExtensionRegistryServiceClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + ExtensionRegistryServiceClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + ExtensionRegistryServiceClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + ExtensionRegistryServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionRegistryServiceClient), +) +@mock.patch.object( + ExtensionRegistryServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionRegistryServiceAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = ExtensionRegistryServiceClient._DEFAULT_UNIVERSE + default_endpoint = ExtensionRegistryServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = ExtensionRegistryServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + ExtensionRegistryServiceClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + ExtensionRegistryServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == ExtensionRegistryServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + ExtensionRegistryServiceClient._get_api_endpoint( + None, None, default_universe, "auto" + ) + == default_endpoint + ) + assert ( + ExtensionRegistryServiceClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == ExtensionRegistryServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + ExtensionRegistryServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == ExtensionRegistryServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + ExtensionRegistryServiceClient._get_api_endpoint( + None, None, mock_universe, "never" + ) + == mock_endpoint + ) + assert ( + ExtensionRegistryServiceClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + ExtensionRegistryServiceClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + ExtensionRegistryServiceClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + ExtensionRegistryServiceClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + ExtensionRegistryServiceClient._get_universe_domain(None, None) + == ExtensionRegistryServiceClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + ExtensionRegistryServiceClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceGrpcTransport, + "grpc", + ), + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceRestTransport, + "rest", + ), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (ExtensionRegistryServiceClient, "grpc"), + (ExtensionRegistryServiceAsyncClient, "grpc_asyncio"), + (ExtensionRegistryServiceClient, "rest"), + ], +) +def test_extension_registry_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.ExtensionRegistryServiceGrpcTransport, "grpc"), + (transports.ExtensionRegistryServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.ExtensionRegistryServiceRestTransport, "rest"), + ], +) +def test_extension_registry_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (ExtensionRegistryServiceClient, "grpc"), + (ExtensionRegistryServiceAsyncClient, "grpc_asyncio"), + (ExtensionRegistryServiceClient, "rest"), + ], +) +def test_extension_registry_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +def test_extension_registry_service_client_get_transport_class(): + transport = ExtensionRegistryServiceClient.get_transport_class() + available_transports = [ + transports.ExtensionRegistryServiceGrpcTransport, + transports.ExtensionRegistryServiceRestTransport, + ] + assert transport in available_transports + + transport = ExtensionRegistryServiceClient.get_transport_class("grpc") + assert transport == transports.ExtensionRegistryServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceGrpcTransport, + "grpc", + ), + ( + ExtensionRegistryServiceAsyncClient, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceRestTransport, + "rest", + ), + ], +) +@mock.patch.object( + ExtensionRegistryServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionRegistryServiceClient), +) +@mock.patch.object( + ExtensionRegistryServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionRegistryServiceAsyncClient), +) +def test_extension_registry_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object( + ExtensionRegistryServiceClient, "get_transport_class" + ) as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object( + ExtensionRegistryServiceClient, "get_transport_class" + ) as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceGrpcTransport, + "grpc", + "true", + ), + ( + ExtensionRegistryServiceAsyncClient, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceGrpcTransport, + "grpc", + "false", + ), + ( + ExtensionRegistryServiceAsyncClient, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceRestTransport, + "rest", + "true", + ), + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + ExtensionRegistryServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionRegistryServiceClient), +) +@mock.patch.object( + ExtensionRegistryServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionRegistryServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_extension_registry_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", + [ExtensionRegistryServiceClient, ExtensionRegistryServiceAsyncClient], +) +@mock.patch.object( + ExtensionRegistryServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ExtensionRegistryServiceClient), +) +@mock.patch.object( + ExtensionRegistryServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ExtensionRegistryServiceAsyncClient), +) +def test_extension_registry_service_client_get_mtls_endpoint_and_cert_source( + client_class, +): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", + [ExtensionRegistryServiceClient, ExtensionRegistryServiceAsyncClient], +) +@mock.patch.object( + ExtensionRegistryServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionRegistryServiceClient), +) +@mock.patch.object( + ExtensionRegistryServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(ExtensionRegistryServiceAsyncClient), +) +def test_extension_registry_service_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = ExtensionRegistryServiceClient._DEFAULT_UNIVERSE + default_endpoint = ExtensionRegistryServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = ExtensionRegistryServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceGrpcTransport, + "grpc", + ), + ( + ExtensionRegistryServiceAsyncClient, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceRestTransport, + "rest", + ), + ], +) +def test_extension_registry_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + ExtensionRegistryServiceAsyncClient, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceRestTransport, + "rest", + None, + ), + ], +) +def test_extension_registry_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_extension_registry_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.extension_registry_service.transports.ExtensionRegistryServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = ExtensionRegistryServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + ExtensionRegistryServiceAsyncClient, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_extension_registry_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + extension_registry_service.ImportExtensionRequest, + dict, + ], +) +def test_import_extension(request_type, transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.import_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = extension_registry_service.ImportExtensionRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_extension_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_extension), "__call__") as call: + client.import_extension() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.ImportExtensionRequest() + + +def test_import_extension_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = extension_registry_service.ImportExtensionRequest( + parent="parent_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_extension), "__call__") as call: + client.import_extension(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.ImportExtensionRequest( + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_import_extension_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.import_extension() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.ImportExtensionRequest() + + +@pytest.mark.asyncio +async def test_import_extension_async( + transport: str = "grpc_asyncio", + request_type=extension_registry_service.ImportExtensionRequest, +): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.import_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = extension_registry_service.ImportExtensionRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_extension_async_from_dict(): + await test_import_extension_async(request_type=dict) + + +def test_import_extension_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_registry_service.ImportExtensionRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_extension), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.import_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_import_extension_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_registry_service.ImportExtensionRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_extension), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.import_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_import_extension_flattened(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_extension( + parent="parent_value", + extension=gca_extension.Extension(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].extension + mock_val = gca_extension.Extension(name="name_value") + assert arg == mock_val + + +def test_import_extension_flattened_error(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_extension( + extension_registry_service.ImportExtensionRequest(), + parent="parent_value", + extension=gca_extension.Extension(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_import_extension_flattened_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_extension( + parent="parent_value", + extension=gca_extension.Extension(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].extension + mock_val = gca_extension.Extension(name="name_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_import_extension_flattened_error_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_extension( + extension_registry_service.ImportExtensionRequest(), + parent="parent_value", + extension=gca_extension.Extension(name="name_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + extension_registry_service.GetExtensionRequest, + dict, + ], +) +def test_get_extension(request_type, transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = extension.Extension( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + response = client.get_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = extension_registry_service.GetExtensionRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, extension.Extension) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.etag == "etag_value" + + +def test_get_extension_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_extension), "__call__") as call: + client.get_extension() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.GetExtensionRequest() + + +def test_get_extension_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = extension_registry_service.GetExtensionRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_extension), "__call__") as call: + client.get_extension(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.GetExtensionRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_extension_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension.Extension( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + ) + response = await client.get_extension() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.GetExtensionRequest() + + +@pytest.mark.asyncio +async def test_get_extension_async( + transport: str = "grpc_asyncio", + request_type=extension_registry_service.GetExtensionRequest, +): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension.Extension( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + ) + response = await client.get_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = extension_registry_service.GetExtensionRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, extension.Extension) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_get_extension_async_from_dict(): + await test_get_extension_async(request_type=dict) + + +def test_get_extension_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_registry_service.GetExtensionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_extension), "__call__") as call: + call.return_value = extension.Extension() + client.get_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_extension_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_registry_service.GetExtensionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_extension), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(extension.Extension()) + await client.get_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_extension_flattened(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = extension.Extension() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_extension( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_extension_flattened_error(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_extension( + extension_registry_service.GetExtensionRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_extension_flattened_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = extension.Extension() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(extension.Extension()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_extension( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_extension_flattened_error_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_extension( + extension_registry_service.GetExtensionRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + extension_registry_service.ListExtensionsRequest, + dict, + ], +) +def test_list_extensions(request_type, transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_extensions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = extension_registry_service.ListExtensionsResponse( + next_page_token="next_page_token_value", + ) + response = client.list_extensions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = extension_registry_service.ListExtensionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExtensionsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_extensions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_extensions), "__call__") as call: + client.list_extensions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.ListExtensionsRequest() + + +def test_list_extensions_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = extension_registry_service.ListExtensionsRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + order_by="order_by_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_extensions), "__call__") as call: + client.list_extensions(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.ListExtensionsRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + order_by="order_by_value", + ) + + +@pytest.mark.asyncio +async def test_list_extensions_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_extensions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension_registry_service.ListExtensionsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_extensions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.ListExtensionsRequest() + + +@pytest.mark.asyncio +async def test_list_extensions_async( + transport: str = "grpc_asyncio", + request_type=extension_registry_service.ListExtensionsRequest, +): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_extensions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension_registry_service.ListExtensionsResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_extensions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = extension_registry_service.ListExtensionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExtensionsAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_extensions_async_from_dict(): + await test_list_extensions_async(request_type=dict) + + +def test_list_extensions_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_registry_service.ListExtensionsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_extensions), "__call__") as call: + call.return_value = extension_registry_service.ListExtensionsResponse() + client.list_extensions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_extensions_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_registry_service.ListExtensionsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_extensions), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension_registry_service.ListExtensionsResponse() + ) + await client.list_extensions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_extensions_flattened(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_extensions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = extension_registry_service.ListExtensionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_extensions( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_extensions_flattened_error(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_extensions( + extension_registry_service.ListExtensionsRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_extensions_flattened_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_extensions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = extension_registry_service.ListExtensionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + extension_registry_service.ListExtensionsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_extensions( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_extensions_flattened_error_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_extensions( + extension_registry_service.ListExtensionsRequest(), + parent="parent_value", + ) + + +def test_list_extensions_pager(transport_name: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_extensions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + extension.Extension(), + extension.Extension(), + ], + next_page_token="abc", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[], + next_page_token="def", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + ], + next_page_token="ghi", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + extension.Extension(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_extensions(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, extension.Extension) for i in results) + + +def test_list_extensions_pages(transport_name: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_extensions), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + extension.Extension(), + extension.Extension(), + ], + next_page_token="abc", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[], + next_page_token="def", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + ], + next_page_token="ghi", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + extension.Extension(), + ], + ), + RuntimeError, + ) + pages = list(client.list_extensions(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_extensions_async_pager(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_extensions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + extension.Extension(), + extension.Extension(), + ], + next_page_token="abc", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[], + next_page_token="def", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + ], + next_page_token="ghi", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + extension.Extension(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_extensions( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, extension.Extension) for i in responses) + + +@pytest.mark.asyncio +async def test_list_extensions_async_pages(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_extensions), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + extension.Extension(), + extension.Extension(), + ], + next_page_token="abc", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[], + next_page_token="def", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + ], + next_page_token="ghi", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + extension.Extension(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_extensions(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + extension_registry_service.UpdateExtensionRequest, + dict, + ], +) +def test_update_extension(request_type, transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_extension.Extension( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + response = client.update_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = extension_registry_service.UpdateExtensionRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_extension.Extension) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.etag == "etag_value" + + +def test_update_extension_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_extension), "__call__") as call: + client.update_extension() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.UpdateExtensionRequest() + + +def test_update_extension_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = extension_registry_service.UpdateExtensionRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_extension), "__call__") as call: + client.update_extension(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.UpdateExtensionRequest() + + +@pytest.mark.asyncio +async def test_update_extension_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_extension.Extension( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + ) + response = await client.update_extension() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.UpdateExtensionRequest() + + +@pytest.mark.asyncio +async def test_update_extension_async( + transport: str = "grpc_asyncio", + request_type=extension_registry_service.UpdateExtensionRequest, +): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_extension.Extension( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + ) + response = await client.update_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = extension_registry_service.UpdateExtensionRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_extension.Extension) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.etag == "etag_value" + + +@pytest.mark.asyncio +async def test_update_extension_async_from_dict(): + await test_update_extension_async(request_type=dict) + + +def test_update_extension_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_registry_service.UpdateExtensionRequest() + + request.extension.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_extension), "__call__") as call: + call.return_value = gca_extension.Extension() + client.update_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "extension.name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_update_extension_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_registry_service.UpdateExtensionRequest() + + request.extension.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_extension), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_extension.Extension() + ) + await client.update_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "extension.name=name_value", + ) in kw["metadata"] + + +def test_update_extension_flattened(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_extension.Extension() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_extension( + extension=gca_extension.Extension(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].extension + mock_val = gca_extension.Extension(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +def test_update_extension_flattened_error(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_extension( + extension_registry_service.UpdateExtensionRequest(), + extension=gca_extension.Extension(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.asyncio +async def test_update_extension_flattened_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = gca_extension.Extension() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gca_extension.Extension() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_extension( + extension=gca_extension.Extension(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].extension + mock_val = gca_extension.Extension(name="name_value") + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=["paths_value"]) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_update_extension_flattened_error_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_extension( + extension_registry_service.UpdateExtensionRequest(), + extension=gca_extension.Extension(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + extension_registry_service.DeleteExtensionRequest, + dict, + ], +) +def test_delete_extension(request_type, transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = extension_registry_service.DeleteExtensionRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_extension_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_extension), "__call__") as call: + client.delete_extension() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.DeleteExtensionRequest() + + +def test_delete_extension_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = extension_registry_service.DeleteExtensionRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_extension), "__call__") as call: + client.delete_extension(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.DeleteExtensionRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_extension_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_extension() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == extension_registry_service.DeleteExtensionRequest() + + +@pytest.mark.asyncio +async def test_delete_extension_async( + transport: str = "grpc_asyncio", + request_type=extension_registry_service.DeleteExtensionRequest, +): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = extension_registry_service.DeleteExtensionRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_extension_async_from_dict(): + await test_delete_extension_async(request_type=dict) + + +def test_delete_extension_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_registry_service.DeleteExtensionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_extension), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_extension_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = extension_registry_service.DeleteExtensionRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_extension), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_extension(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_extension_flattened(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_extension( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_extension_flattened_error(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_extension( + extension_registry_service.DeleteExtensionRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_extension_flattened_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_extension), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_extension( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_extension_flattened_error_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_extension( + extension_registry_service.DeleteExtensionRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + extension_registry_service.ImportExtensionRequest, + dict, + ], +) +def test_import_extension_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["extension"] = { + "name": "name_value", + "display_name": "display_name_value", + "description": "description_value", + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "etag": "etag_value", + "manifest": { + "name": "name_value", + "description": "description_value", + "api_spec": { + "open_api_yaml": "open_api_yaml_value", + "open_api_gcs_uri": "open_api_gcs_uri_value", + }, + "auth_config": { + "api_key_config": { + "name": "name_value", + "api_key_secret": "api_key_secret_value", + "http_element_location": 1, + }, + "http_basic_auth_config": { + "credential_secret": "credential_secret_value" + }, + "google_service_account_config": { + "service_account": "service_account_value" + }, + "oauth_config": { + "access_token": "access_token_value", + "service_account": "service_account_value", + }, + "oidc_config": { + "id_token": "id_token_value", + "service_account": "service_account_value", + }, + "auth_type": 1, + }, + }, + "extension_operations": [ + { + "operation_id": "operation_id_value", + "function_declaration": { + "name": "name_value", + "description": "description_value", + "parameters": { + "type_": 1, + "format_": "format__value", + "title": "title_value", + "description": "description_value", + "nullable": True, + "default": { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "struct_value": {"fields": {}}, + "list_value": {"values": {}}, + }, + "items": {}, + "min_items": 965, + "max_items": 967, + "enum": ["enum_value1", "enum_value2"], + "properties": {}, + "required": ["required_value1", "required_value2"], + "min_properties": 1520, + "max_properties": 1522, + "minimum": 0.764, + "maximum": 0.766, + "min_length": 1061, + "max_length": 1063, + "pattern": "pattern_value", + "example": {}, + }, + "response": {}, + }, + } + ], + "runtime_config": { + "code_interpreter_runtime_config": { + "file_input_gcs_bucket": "file_input_gcs_bucket_value", + "file_output_gcs_bucket": "file_output_gcs_bucket_value", + }, + "vertex_ai_search_runtime_config": { + "serving_config_name": "serving_config_name_value" + }, + "default_params": {}, + }, + "tool_use_examples": [ + { + "extension_operation": { + "extension": "extension_value", + "operation_id": "operation_id_value", + }, + "function_name": "function_name_value", + "display_name": "display_name_value", + "query": "query_value", + "request_params": {}, + "response_params": {}, + "response_summary": "response_summary_value", + } + ], + "private_service_connect_config": { + "service_directory": "service_directory_value" + }, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = extension_registry_service.ImportExtensionRequest.meta.fields[ + "extension" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["extension"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["extension"][field])): + del request_init["extension"][field][i][subfield] + else: + del request_init["extension"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.import_extension(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_import_extension_rest_required_fields( + request_type=extension_registry_service.ImportExtensionRequest, +): + transport_class = transports.ExtensionRegistryServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).import_extension._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).import_extension._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.import_extension(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_import_extension_rest_unset_required_fields(): + transport = transports.ExtensionRegistryServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.import_extension._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "extension", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_import_extension_rest_interceptors(null_interceptor): + transport = transports.ExtensionRegistryServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ExtensionRegistryServiceRestInterceptor(), + ) + client = ExtensionRegistryServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.ExtensionRegistryServiceRestInterceptor, "post_import_extension" + ) as post, mock.patch.object( + transports.ExtensionRegistryServiceRestInterceptor, "pre_import_extension" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = extension_registry_service.ImportExtensionRequest.pb( + extension_registry_service.ImportExtensionRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = extension_registry_service.ImportExtensionRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.import_extension( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_import_extension_rest_bad_request( + transport: str = "rest", + request_type=extension_registry_service.ImportExtensionRequest, +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.import_extension(request) + + +def test_import_extension_rest_flattened(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + extension=gca_extension.Extension(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.import_extension(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*}/extensions:import" + % client.transport._host, + args[1], + ) + + +def test_import_extension_rest_flattened_error(transport: str = "rest"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_extension( + extension_registry_service.ImportExtensionRequest(), + parent="parent_value", + extension=gca_extension.Extension(name="name_value"), + ) + + +def test_import_extension_rest_error(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + extension_registry_service.GetExtensionRequest, + dict, + ], +) +def test_get_extension_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/extensions/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = extension.Extension( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = extension.Extension.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_extension(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, extension.Extension) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.etag == "etag_value" + + +def test_get_extension_rest_required_fields( + request_type=extension_registry_service.GetExtensionRequest, +): + transport_class = transports.ExtensionRegistryServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_extension._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_extension._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = extension.Extension() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = extension.Extension.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_extension(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_extension_rest_unset_required_fields(): + transport = transports.ExtensionRegistryServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_extension._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_extension_rest_interceptors(null_interceptor): + transport = transports.ExtensionRegistryServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ExtensionRegistryServiceRestInterceptor(), + ) + client = ExtensionRegistryServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ExtensionRegistryServiceRestInterceptor, "post_get_extension" + ) as post, mock.patch.object( + transports.ExtensionRegistryServiceRestInterceptor, "pre_get_extension" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = extension_registry_service.GetExtensionRequest.pb( + extension_registry_service.GetExtensionRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = extension.Extension.to_json(extension.Extension()) + + request = extension_registry_service.GetExtensionRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = extension.Extension() + + client.get_extension( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_extension_rest_bad_request( + transport: str = "rest", request_type=extension_registry_service.GetExtensionRequest +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/extensions/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_extension(request) + + +def test_get_extension_rest_flattened(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = extension.Extension() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/extensions/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = extension.Extension.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_extension(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/extensions/*}" + % client.transport._host, + args[1], + ) + + +def test_get_extension_rest_flattened_error(transport: str = "rest"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_extension( + extension_registry_service.GetExtensionRequest(), + name="name_value", + ) + + +def test_get_extension_rest_error(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + extension_registry_service.ListExtensionsRequest, + dict, + ], +) +def test_list_extensions_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = extension_registry_service.ListExtensionsResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = extension_registry_service.ListExtensionsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_extensions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListExtensionsPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_extensions_rest_required_fields( + request_type=extension_registry_service.ListExtensionsRequest, +): + transport_class = transports.ExtensionRegistryServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_extensions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_extensions._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = extension_registry_service.ListExtensionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = extension_registry_service.ListExtensionsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_extensions(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_extensions_rest_unset_required_fields(): + transport = transports.ExtensionRegistryServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_extensions._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_extensions_rest_interceptors(null_interceptor): + transport = transports.ExtensionRegistryServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ExtensionRegistryServiceRestInterceptor(), + ) + client = ExtensionRegistryServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ExtensionRegistryServiceRestInterceptor, "post_list_extensions" + ) as post, mock.patch.object( + transports.ExtensionRegistryServiceRestInterceptor, "pre_list_extensions" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = extension_registry_service.ListExtensionsRequest.pb( + extension_registry_service.ListExtensionsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + extension_registry_service.ListExtensionsResponse.to_json( + extension_registry_service.ListExtensionsResponse() + ) + ) + + request = extension_registry_service.ListExtensionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = extension_registry_service.ListExtensionsResponse() + + client.list_extensions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_extensions_rest_bad_request( + transport: str = "rest", + request_type=extension_registry_service.ListExtensionsRequest, +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_extensions(request) + + +def test_list_extensions_rest_flattened(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = extension_registry_service.ListExtensionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = extension_registry_service.ListExtensionsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_extensions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*}/extensions" + % client.transport._host, + args[1], + ) + + +def test_list_extensions_rest_flattened_error(transport: str = "rest"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_extensions( + extension_registry_service.ListExtensionsRequest(), + parent="parent_value", + ) + + +def test_list_extensions_rest_pager(transport: str = "rest"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + extension.Extension(), + extension.Extension(), + ], + next_page_token="abc", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[], + next_page_token="def", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + ], + next_page_token="ghi", + ), + extension_registry_service.ListExtensionsResponse( + extensions=[ + extension.Extension(), + extension.Extension(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + extension_registry_service.ListExtensionsResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_extensions(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, extension.Extension) for i in results) + + pages = list(client.list_extensions(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + extension_registry_service.UpdateExtensionRequest, + dict, + ], +) +def test_update_extension_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "extension": {"name": "projects/sample1/locations/sample2/extensions/sample3"} + } + request_init["extension"] = { + "name": "projects/sample1/locations/sample2/extensions/sample3", + "display_name": "display_name_value", + "description": "description_value", + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "etag": "etag_value", + "manifest": { + "name": "name_value", + "description": "description_value", + "api_spec": { + "open_api_yaml": "open_api_yaml_value", + "open_api_gcs_uri": "open_api_gcs_uri_value", + }, + "auth_config": { + "api_key_config": { + "name": "name_value", + "api_key_secret": "api_key_secret_value", + "http_element_location": 1, + }, + "http_basic_auth_config": { + "credential_secret": "credential_secret_value" + }, + "google_service_account_config": { + "service_account": "service_account_value" + }, + "oauth_config": { + "access_token": "access_token_value", + "service_account": "service_account_value", + }, + "oidc_config": { + "id_token": "id_token_value", + "service_account": "service_account_value", + }, + "auth_type": 1, + }, + }, + "extension_operations": [ + { + "operation_id": "operation_id_value", + "function_declaration": { + "name": "name_value", + "description": "description_value", + "parameters": { + "type_": 1, + "format_": "format__value", + "title": "title_value", + "description": "description_value", + "nullable": True, + "default": { + "null_value": 0, + "number_value": 0.1285, + "string_value": "string_value_value", + "bool_value": True, + "struct_value": {"fields": {}}, + "list_value": {"values": {}}, + }, + "items": {}, + "min_items": 965, + "max_items": 967, + "enum": ["enum_value1", "enum_value2"], + "properties": {}, + "required": ["required_value1", "required_value2"], + "min_properties": 1520, + "max_properties": 1522, + "minimum": 0.764, + "maximum": 0.766, + "min_length": 1061, + "max_length": 1063, + "pattern": "pattern_value", + "example": {}, + }, + "response": {}, + }, + } + ], + "runtime_config": { + "code_interpreter_runtime_config": { + "file_input_gcs_bucket": "file_input_gcs_bucket_value", + "file_output_gcs_bucket": "file_output_gcs_bucket_value", + }, + "vertex_ai_search_runtime_config": { + "serving_config_name": "serving_config_name_value" + }, + "default_params": {}, + }, + "tool_use_examples": [ + { + "extension_operation": { + "extension": "extension_value", + "operation_id": "operation_id_value", + }, + "function_name": "function_name_value", + "display_name": "display_name_value", + "query": "query_value", + "request_params": {}, + "response_params": {}, + "response_summary": "response_summary_value", + } + ], + "private_service_connect_config": { + "service_directory": "service_directory_value" + }, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = extension_registry_service.UpdateExtensionRequest.meta.fields[ + "extension" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["extension"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["extension"][field])): + del request_init["extension"][field][i][subfield] + else: + del request_init["extension"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gca_extension.Extension( + name="name_value", + display_name="display_name_value", + description="description_value", + etag="etag_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_extension.Extension.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.update_extension(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gca_extension.Extension) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.etag == "etag_value" + + +def test_update_extension_rest_required_fields( + request_type=extension_registry_service.UpdateExtensionRequest, +): + transport_class = transports.ExtensionRegistryServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_extension._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).update_extension._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gca_extension.Extension() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "patch", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gca_extension.Extension.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.update_extension(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_update_extension_rest_unset_required_fields(): + transport = transports.ExtensionRegistryServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.update_extension._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(("updateMask",)) + & set( + ( + "extension", + "updateMask", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_extension_rest_interceptors(null_interceptor): + transport = transports.ExtensionRegistryServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ExtensionRegistryServiceRestInterceptor(), + ) + client = ExtensionRegistryServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.ExtensionRegistryServiceRestInterceptor, "post_update_extension" + ) as post, mock.patch.object( + transports.ExtensionRegistryServiceRestInterceptor, "pre_update_extension" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = extension_registry_service.UpdateExtensionRequest.pb( + extension_registry_service.UpdateExtensionRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gca_extension.Extension.to_json( + gca_extension.Extension() + ) + + request = extension_registry_service.UpdateExtensionRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gca_extension.Extension() + + client.update_extension( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_extension_rest_bad_request( + transport: str = "rest", + request_type=extension_registry_service.UpdateExtensionRequest, +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "extension": {"name": "projects/sample1/locations/sample2/extensions/sample3"} + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_extension(request) + + +def test_update_extension_rest_flattened(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = gca_extension.Extension() + + # get arguments that satisfy an http rule for this method + sample_request = { + "extension": { + "name": "projects/sample1/locations/sample2/extensions/sample3" + } + } + + # get truthy value for each flattened field + mock_args = dict( + extension=gca_extension.Extension(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gca_extension.Extension.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.update_extension(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{extension.name=projects/*/locations/*/extensions/*}" + % client.transport._host, + args[1], + ) + + +def test_update_extension_rest_flattened_error(transport: str = "rest"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_extension( + extension_registry_service.UpdateExtensionRequest(), + extension=gca_extension.Extension(name="name_value"), + update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]), + ) + + +def test_update_extension_rest_error(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + extension_registry_service.DeleteExtensionRequest, + dict, + ], +) +def test_delete_extension_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/extensions/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_extension(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_extension_rest_required_fields( + request_type=extension_registry_service.DeleteExtensionRequest, +): + transport_class = transports.ExtensionRegistryServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_extension._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_extension._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_extension(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_extension_rest_unset_required_fields(): + transport = transports.ExtensionRegistryServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_extension._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_extension_rest_interceptors(null_interceptor): + transport = transports.ExtensionRegistryServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.ExtensionRegistryServiceRestInterceptor(), + ) + client = ExtensionRegistryServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.ExtensionRegistryServiceRestInterceptor, "post_delete_extension" + ) as post, mock.patch.object( + transports.ExtensionRegistryServiceRestInterceptor, "pre_delete_extension" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = extension_registry_service.DeleteExtensionRequest.pb( + extension_registry_service.DeleteExtensionRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = extension_registry_service.DeleteExtensionRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_extension( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_extension_rest_bad_request( + transport: str = "rest", + request_type=extension_registry_service.DeleteExtensionRequest, +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/extensions/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_extension(request) + + +def test_delete_extension_rest_flattened(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/extensions/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_extension(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/extensions/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_extension_rest_flattened_error(transport: str = "rest"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_extension( + extension_registry_service.DeleteExtensionRequest(), + name="name_value", + ) + + +def test_delete_extension_rest_error(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ExtensionRegistryServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ExtensionRegistryServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ExtensionRegistryServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.ExtensionRegistryServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ExtensionRegistryServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = ExtensionRegistryServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ExtensionRegistryServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ExtensionRegistryServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ExtensionRegistryServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ExtensionRegistryServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ExtensionRegistryServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ExtensionRegistryServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ExtensionRegistryServiceGrpcTransport, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + transports.ExtensionRegistryServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = ExtensionRegistryServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ExtensionRegistryServiceGrpcTransport, + ) + + +def test_extension_registry_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ExtensionRegistryServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_extension_registry_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.extension_registry_service.transports.ExtensionRegistryServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.ExtensionRegistryServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "import_extension", + "get_extension", + "list_extensions", + "update_extension", + "delete_extension", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + "get_location", + "list_locations", + "get_operation", + "wait_operation", + "cancel_operation", + "delete_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_extension_registry_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.extension_registry_service.transports.ExtensionRegistryServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ExtensionRegistryServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_extension_registry_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.extension_registry_service.transports.ExtensionRegistryServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ExtensionRegistryServiceTransport() + adc.assert_called_once() + + +def test_extension_registry_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ExtensionRegistryServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ExtensionRegistryServiceGrpcTransport, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + ], +) +def test_extension_registry_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ExtensionRegistryServiceGrpcTransport, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + transports.ExtensionRegistryServiceRestTransport, + ], +) +def test_extension_registry_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ExtensionRegistryServiceGrpcTransport, grpc_helpers), + (transports.ExtensionRegistryServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_extension_registry_service_transport_create_channel( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ExtensionRegistryServiceGrpcTransport, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + ], +) +def test_extension_registry_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_extension_registry_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.ExtensionRegistryServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_extension_registry_service_rest_lro_client(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_extension_registry_service_host_no_port(transport_name): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_extension_registry_service_host_with_port(transport_name): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_extension_registry_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = ExtensionRegistryServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = ExtensionRegistryServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.import_extension._session + session2 = client2.transport.import_extension._session + assert session1 != session2 + session1 = client1.transport.get_extension._session + session2 = client2.transport.get_extension._session + assert session1 != session2 + session1 = client1.transport.list_extensions._session + session2 = client2.transport.list_extensions._session + assert session1 != session2 + session1 = client1.transport.update_extension._session + session2 = client2.transport.update_extension._session + assert session1 != session2 + session1 = client1.transport.delete_extension._session + session2 = client2.transport.delete_extension._session + assert session1 != session2 + + +def test_extension_registry_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ExtensionRegistryServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_extension_registry_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ExtensionRegistryServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.ExtensionRegistryServiceGrpcTransport, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + ], +) +def test_extension_registry_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.ExtensionRegistryServiceGrpcTransport, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + ], +) +def test_extension_registry_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_extension_registry_service_grpc_lro_client(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_extension_registry_service_grpc_lro_async_client(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_extension_path(): + project = "squid" + location = "clam" + extension = "whelk" + expected = "projects/{project}/locations/{location}/extensions/{extension}".format( + project=project, + location=location, + extension=extension, + ) + actual = ExtensionRegistryServiceClient.extension_path(project, location, extension) + assert expected == actual + + +def test_parse_extension_path(): + expected = { + "project": "octopus", + "location": "oyster", + "extension": "nudibranch", + } + path = ExtensionRegistryServiceClient.extension_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionRegistryServiceClient.parse_extension_path(path) + assert expected == actual + + +def test_secret_version_path(): + project = "cuttlefish" + secret = "mussel" + secret_version = "winkle" + expected = "projects/{project}/secrets/{secret}/versions/{secret_version}".format( + project=project, + secret=secret, + secret_version=secret_version, + ) + actual = ExtensionRegistryServiceClient.secret_version_path( + project, secret, secret_version + ) + assert expected == actual + + +def test_parse_secret_version_path(): + expected = { + "project": "nautilus", + "secret": "scallop", + "secret_version": "abalone", + } + path = ExtensionRegistryServiceClient.secret_version_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionRegistryServiceClient.parse_secret_version_path(path) + assert expected == actual + + +def test_service_path(): + project = "squid" + location = "clam" + namespace = "whelk" + service = "octopus" + expected = "projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}".format( + project=project, + location=location, + namespace=namespace, + service=service, + ) + actual = ExtensionRegistryServiceClient.service_path( + project, location, namespace, service + ) + assert expected == actual + + +def test_parse_service_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "namespace": "cuttlefish", + "service": "mussel", + } + path = ExtensionRegistryServiceClient.service_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionRegistryServiceClient.parse_service_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = ExtensionRegistryServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = ExtensionRegistryServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionRegistryServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = ExtensionRegistryServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = ExtensionRegistryServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionRegistryServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = ExtensionRegistryServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = ExtensionRegistryServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionRegistryServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format( + project=project, + ) + actual = ExtensionRegistryServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = ExtensionRegistryServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionRegistryServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = ExtensionRegistryServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = ExtensionRegistryServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ExtensionRegistryServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.ExtensionRegistryServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.ExtensionRegistryServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = ExtensionRegistryServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_get_location_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.GetLocationRequest +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_location(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.GetLocationRequest, + dict, + ], +) +def test_get_location_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.Location() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_location(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_list_locations_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.ListLocationsRequest +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({"name": "projects/sample1"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_locations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.ListLocationsRequest, + dict, + ], +) +def test_list_locations_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_locations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_get_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_set_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_test_iam_permissions_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + +def test_cancel_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.CancelOperationRequest +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.DeleteOperationRequest +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.DeleteOperationRequest, + dict, + ], +) +def test_delete_operation_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.ListOperationsRequest +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_wait_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.WaitOperationRequest +): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.WaitOperationRequest, + dict, + ], +) +def test_wait_operation_rest(request_type): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.wait_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_delete_operation(transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_delete_operation_from_dict(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_wait_operation(transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc_asyncio"): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_wait_operation_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_wait_operation_from_dict(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_list_locations_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_locations_from_dict(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_get_location_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +def test_get_location_from_dict(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +def test_get_iam_policy(transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_test_iam_permissions(transport: str = "grpc"): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = ExtensionRegistryServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = ExtensionRegistryServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + ( + ExtensionRegistryServiceClient, + transports.ExtensionRegistryServiceGrpcTransport, + ), + ( + ExtensionRegistryServiceAsyncClient, + transports.ExtensionRegistryServiceGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py index 611fee1d34..98ae39741f 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_feature_online_store_admin_service.py @@ -7771,6 +7771,15 @@ def test_create_feature_view_rest(request_type): "embedding_dimension": 1988, "distance_measure_type": 1, }, + "index_config": { + "tree_ah_config": {"leaf_node_embedding_count": 2595}, + "brute_force_config": {}, + "embedding_column": "embedding_column_value", + "filter_columns": ["filter_columns_value1", "filter_columns_value2"], + "crowding_column": "crowding_column_value", + "embedding_dimension": 1988, + "distance_measure_type": 1, + }, "service_agent_type": 1, "service_account_email": "service_account_email_value", } @@ -8844,6 +8853,15 @@ def test_update_feature_view_rest(request_type): "embedding_dimension": 1988, "distance_measure_type": 1, }, + "index_config": { + "tree_ah_config": {"leaf_node_embedding_count": 2595}, + "brute_force_config": {}, + "embedding_column": "embedding_column_value", + "filter_columns": ["filter_columns_value1", "filter_columns_value2"], + "crowding_column": "crowding_column_value", + "embedding_dimension": 1988, + "distance_measure_type": 1, + }, "service_agent_type": 1, "service_account_email": "service_account_email_value", } diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py new file mode 100644 index 0000000000..7a8c62184e --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_notebook_service.py @@ -0,0 +1,10798 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from google.api_core import api_core_version +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.notebook_service import ( + NotebookServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.notebook_service import ( + NotebookServiceClient, +) +from google.cloud.aiplatform_v1beta1.services.notebook_service import pagers +from google.cloud.aiplatform_v1beta1.services.notebook_service import transports +from google.cloud.aiplatform_v1beta1.types import accelerator_type +from google.cloud.aiplatform_v1beta1.types import machine_resources +from google.cloud.aiplatform_v1beta1.types import network_spec +from google.cloud.aiplatform_v1beta1.types import notebook_euc_config +from google.cloud.aiplatform_v1beta1.types import notebook_idle_shutdown_config +from google.cloud.aiplatform_v1beta1.types import notebook_runtime +from google.cloud.aiplatform_v1beta1.types import ( + notebook_runtime as gca_notebook_runtime, +) +from google.cloud.aiplatform_v1beta1.types import notebook_runtime_template_ref +from google.cloud.aiplatform_v1beta1.types import notebook_service +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert NotebookServiceClient._get_default_mtls_endpoint(None) is None + assert ( + NotebookServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + NotebookServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + NotebookServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + NotebookServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + NotebookServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + ) + + +def test__read_environment_variables(): + assert NotebookServiceClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert NotebookServiceClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert NotebookServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + NotebookServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert NotebookServiceClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert NotebookServiceClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert NotebookServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + NotebookServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert NotebookServiceClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert NotebookServiceClient._get_client_cert_source(None, False) is None + assert ( + NotebookServiceClient._get_client_cert_source(mock_provided_cert_source, False) + is None + ) + assert ( + NotebookServiceClient._get_client_cert_source(mock_provided_cert_source, True) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + NotebookServiceClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + NotebookServiceClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + NotebookServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceClient), +) +@mock.patch.object( + NotebookServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = NotebookServiceClient._DEFAULT_UNIVERSE + default_endpoint = NotebookServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = NotebookServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + NotebookServiceClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + NotebookServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == NotebookServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + NotebookServiceClient._get_api_endpoint(None, None, default_universe, "auto") + == default_endpoint + ) + assert ( + NotebookServiceClient._get_api_endpoint(None, None, default_universe, "always") + == NotebookServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + NotebookServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == NotebookServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + NotebookServiceClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + NotebookServiceClient._get_api_endpoint(None, None, default_universe, "never") + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + NotebookServiceClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + NotebookServiceClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + NotebookServiceClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + NotebookServiceClient._get_universe_domain(None, None) + == NotebookServiceClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + NotebookServiceClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (NotebookServiceClient, transports.NotebookServiceGrpcTransport, "grpc"), + (NotebookServiceClient, transports.NotebookServiceRestTransport, "rest"), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (NotebookServiceClient, "grpc"), + (NotebookServiceAsyncClient, "grpc_asyncio"), + (NotebookServiceClient, "rest"), + ], +) +def test_notebook_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.NotebookServiceGrpcTransport, "grpc"), + (transports.NotebookServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.NotebookServiceRestTransport, "rest"), + ], +) +def test_notebook_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (NotebookServiceClient, "grpc"), + (NotebookServiceAsyncClient, "grpc_asyncio"), + (NotebookServiceClient, "rest"), + ], +) +def test_notebook_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +def test_notebook_service_client_get_transport_class(): + transport = NotebookServiceClient.get_transport_class() + available_transports = [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceRestTransport, + ] + assert transport in available_transports + + transport = NotebookServiceClient.get_transport_class("grpc") + assert transport == transports.NotebookServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (NotebookServiceClient, transports.NotebookServiceGrpcTransport, "grpc"), + ( + NotebookServiceAsyncClient, + transports.NotebookServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (NotebookServiceClient, transports.NotebookServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + NotebookServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceClient), +) +@mock.patch.object( + NotebookServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceAsyncClient), +) +def test_notebook_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(NotebookServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(NotebookServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + NotebookServiceClient, + transports.NotebookServiceGrpcTransport, + "grpc", + "true", + ), + ( + NotebookServiceAsyncClient, + transports.NotebookServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + NotebookServiceClient, + transports.NotebookServiceGrpcTransport, + "grpc", + "false", + ), + ( + NotebookServiceAsyncClient, + transports.NotebookServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + NotebookServiceClient, + transports.NotebookServiceRestTransport, + "rest", + "true", + ), + ( + NotebookServiceClient, + transports.NotebookServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + NotebookServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceClient), +) +@mock.patch.object( + NotebookServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_notebook_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [NotebookServiceClient, NotebookServiceAsyncClient] +) +@mock.patch.object( + NotebookServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NotebookServiceClient), +) +@mock.patch.object( + NotebookServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NotebookServiceAsyncClient), +) +def test_notebook_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", [NotebookServiceClient, NotebookServiceAsyncClient] +) +@mock.patch.object( + NotebookServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceClient), +) +@mock.patch.object( + NotebookServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(NotebookServiceAsyncClient), +) +def test_notebook_service_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = NotebookServiceClient._DEFAULT_UNIVERSE + default_endpoint = NotebookServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = NotebookServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (NotebookServiceClient, transports.NotebookServiceGrpcTransport, "grpc"), + ( + NotebookServiceAsyncClient, + transports.NotebookServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (NotebookServiceClient, transports.NotebookServiceRestTransport, "rest"), + ], +) +def test_notebook_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + NotebookServiceClient, + transports.NotebookServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + NotebookServiceAsyncClient, + transports.NotebookServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + (NotebookServiceClient, transports.NotebookServiceRestTransport, "rest", None), + ], +) +def test_notebook_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_notebook_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.notebook_service.transports.NotebookServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = NotebookServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + NotebookServiceClient, + transports.NotebookServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + NotebookServiceAsyncClient, + transports.NotebookServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_notebook_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.CreateNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_create_notebook_runtime_template(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.CreateNotebookRuntimeTemplateRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_notebook_runtime_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + client.create_notebook_runtime_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.CreateNotebookRuntimeTemplateRequest() + + +def test_create_notebook_runtime_template_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.CreateNotebookRuntimeTemplateRequest( + parent="parent_value", + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + client.create_notebook_runtime_template(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.CreateNotebookRuntimeTemplateRequest( + parent="parent_value", + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_notebook_runtime_template_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_notebook_runtime_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.CreateNotebookRuntimeTemplateRequest() + + +@pytest.mark.asyncio +async def test_create_notebook_runtime_template_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.CreateNotebookRuntimeTemplateRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.CreateNotebookRuntimeTemplateRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_notebook_runtime_template_async_from_dict(): + await test_create_notebook_runtime_template_async(request_type=dict) + + +def test_create_notebook_runtime_template_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.CreateNotebookRuntimeTemplateRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_notebook_runtime_template_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.CreateNotebookRuntimeTemplateRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_notebook_runtime_template_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_notebook_runtime_template( + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].notebook_runtime_template + mock_val = notebook_runtime.NotebookRuntimeTemplate(name="name_value") + assert arg == mock_val + arg = args[0].notebook_runtime_template_id + mock_val = "notebook_runtime_template_id_value" + assert arg == mock_val + + +def test_create_notebook_runtime_template_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_notebook_runtime_template( + notebook_service.CreateNotebookRuntimeTemplateRequest(), + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_notebook_runtime_template_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_notebook_runtime_template( + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].notebook_runtime_template + mock_val = notebook_runtime.NotebookRuntimeTemplate(name="name_value") + assert arg == mock_val + arg = args[0].notebook_runtime_template_id + mock_val = "notebook_runtime_template_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_notebook_runtime_template_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_notebook_runtime_template( + notebook_service.CreateNotebookRuntimeTemplateRequest(), + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.GetNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_get_notebook_runtime_template(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_runtime.NotebookRuntimeTemplate( + name="name_value", + display_name="display_name_value", + description="description_value", + is_default=True, + service_account="service_account_value", + etag="etag_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + response = client.get_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.GetNotebookRuntimeTemplateRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntimeTemplate) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.is_default is True + assert response.service_account == "service_account_value" + assert response.etag == "etag_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +def test_get_notebook_runtime_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + client.get_notebook_runtime_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookRuntimeTemplateRequest() + + +def test_get_notebook_runtime_template_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.GetNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + client.get_notebook_runtime_template(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookRuntimeTemplateRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_template_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntimeTemplate( + name="name_value", + display_name="display_name_value", + description="description_value", + is_default=True, + service_account="service_account_value", + etag="etag_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + ) + response = await client.get_notebook_runtime_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookRuntimeTemplateRequest() + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_template_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.GetNotebookRuntimeTemplateRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntimeTemplate( + name="name_value", + display_name="display_name_value", + description="description_value", + is_default=True, + service_account="service_account_value", + etag="etag_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + ) + response = await client.get_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.GetNotebookRuntimeTemplateRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntimeTemplate) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.is_default is True + assert response.service_account == "service_account_value" + assert response.etag == "etag_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_template_async_from_dict(): + await test_get_notebook_runtime_template_async(request_type=dict) + + +def test_get_notebook_runtime_template_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.GetNotebookRuntimeTemplateRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + call.return_value = notebook_runtime.NotebookRuntimeTemplate() + client.get_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_template_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.GetNotebookRuntimeTemplateRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntimeTemplate() + ) + await client.get_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_notebook_runtime_template_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_runtime.NotebookRuntimeTemplate() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_notebook_runtime_template( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_notebook_runtime_template_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notebook_runtime_template( + notebook_service.GetNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_template_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_runtime.NotebookRuntimeTemplate() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntimeTemplate() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_notebook_runtime_template( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_template_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_notebook_runtime_template( + notebook_service.GetNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.ListNotebookRuntimeTemplatesRequest, + dict, + ], +) +def test_list_notebook_runtime_templates(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookRuntimeTemplatesResponse( + next_page_token="next_page_token_value", + ) + response = client.list_notebook_runtime_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.ListNotebookRuntimeTemplatesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimeTemplatesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_notebook_runtime_templates_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + client.list_notebook_runtime_templates() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookRuntimeTemplatesRequest() + + +def test_list_notebook_runtime_templates_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.ListNotebookRuntimeTemplatesRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + order_by="order_by_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + client.list_notebook_runtime_templates(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookRuntimeTemplatesRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + order_by="order_by_value", + ) + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimeTemplatesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_notebook_runtime_templates() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookRuntimeTemplatesRequest() + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.ListNotebookRuntimeTemplatesRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimeTemplatesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_notebook_runtime_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.ListNotebookRuntimeTemplatesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimeTemplatesAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_async_from_dict(): + await test_list_notebook_runtime_templates_async(request_type=dict) + + +def test_list_notebook_runtime_templates_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.ListNotebookRuntimeTemplatesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + call.return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + client.list_notebook_runtime_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.ListNotebookRuntimeTemplatesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimeTemplatesResponse() + ) + await client.list_notebook_runtime_templates(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_notebook_runtime_templates_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_notebook_runtime_templates( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_notebook_runtime_templates_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_notebook_runtime_templates( + notebook_service.ListNotebookRuntimeTemplatesRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimeTemplatesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_notebook_runtime_templates( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_notebook_runtime_templates( + notebook_service.ListNotebookRuntimeTemplatesRequest(), + parent="parent_value", + ) + + +def test_list_notebook_runtime_templates_pager(transport_name: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_notebook_runtime_templates(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, notebook_runtime.NotebookRuntimeTemplate) for i in results + ) + + +def test_list_notebook_runtime_templates_pages(transport_name: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + ), + RuntimeError, + ) + pages = list(client.list_notebook_runtime_templates(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_async_pager(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_notebook_runtime_templates( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all( + isinstance(i, notebook_runtime.NotebookRuntimeTemplate) for i in responses + ) + + +@pytest.mark.asyncio +async def test_list_notebook_runtime_templates_async_pages(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtime_templates), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_notebook_runtime_templates(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.DeleteNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_delete_notebook_runtime_template(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.DeleteNotebookRuntimeTemplateRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_notebook_runtime_template_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + client.delete_notebook_runtime_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookRuntimeTemplateRequest() + + +def test_delete_notebook_runtime_template_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.DeleteNotebookRuntimeTemplateRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + client.delete_notebook_runtime_template(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookRuntimeTemplateRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_template_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_notebook_runtime_template() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookRuntimeTemplateRequest() + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_template_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.DeleteNotebookRuntimeTemplateRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.DeleteNotebookRuntimeTemplateRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_template_async_from_dict(): + await test_delete_notebook_runtime_template_async(request_type=dict) + + +def test_delete_notebook_runtime_template_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.DeleteNotebookRuntimeTemplateRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_template_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.DeleteNotebookRuntimeTemplateRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_notebook_runtime_template(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_notebook_runtime_template_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_notebook_runtime_template( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_notebook_runtime_template_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_notebook_runtime_template( + notebook_service.DeleteNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_template_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime_template), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_notebook_runtime_template( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_template_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_notebook_runtime_template( + notebook_service.DeleteNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.AssignNotebookRuntimeRequest, + dict, + ], +) +def test_assign_notebook_runtime(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.assign_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.AssignNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_assign_notebook_runtime_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + client.assign_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.AssignNotebookRuntimeRequest() + + +def test_assign_notebook_runtime_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.AssignNotebookRuntimeRequest( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime_id="notebook_runtime_id_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + client.assign_notebook_runtime(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.AssignNotebookRuntimeRequest( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime_id="notebook_runtime_id_value", + ) + + +@pytest.mark.asyncio +async def test_assign_notebook_runtime_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.assign_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.AssignNotebookRuntimeRequest() + + +@pytest.mark.asyncio +async def test_assign_notebook_runtime_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.AssignNotebookRuntimeRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.assign_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.AssignNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_assign_notebook_runtime_async_from_dict(): + await test_assign_notebook_runtime_async(request_type=dict) + + +def test_assign_notebook_runtime_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.AssignNotebookRuntimeRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.assign_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_assign_notebook_runtime_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.AssignNotebookRuntimeRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.assign_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_assign_notebook_runtime_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.assign_notebook_runtime( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].notebook_runtime_template + mock_val = "notebook_runtime_template_value" + assert arg == mock_val + arg = args[0].notebook_runtime + mock_val = gca_notebook_runtime.NotebookRuntime(name="name_value") + assert arg == mock_val + arg = args[0].notebook_runtime_id + mock_val = "notebook_runtime_id_value" + assert arg == mock_val + + +def test_assign_notebook_runtime_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.assign_notebook_runtime( + notebook_service.AssignNotebookRuntimeRequest(), + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", + ) + + +@pytest.mark.asyncio +async def test_assign_notebook_runtime_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.assign_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.assign_notebook_runtime( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].notebook_runtime_template + mock_val = "notebook_runtime_template_value" + assert arg == mock_val + arg = args[0].notebook_runtime + mock_val = gca_notebook_runtime.NotebookRuntime(name="name_value") + assert arg == mock_val + arg = args[0].notebook_runtime_id + mock_val = "notebook_runtime_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_assign_notebook_runtime_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.assign_notebook_runtime( + notebook_service.AssignNotebookRuntimeRequest(), + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.GetNotebookRuntimeRequest, + dict, + ], +) +def test_get_notebook_runtime(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_runtime.NotebookRuntime( + name="name_value", + runtime_user="runtime_user_value", + proxy_uri="proxy_uri_value", + health_state=notebook_runtime.NotebookRuntime.HealthState.HEALTHY, + display_name="display_name_value", + description="description_value", + service_account="service_account_value", + runtime_state=notebook_runtime.NotebookRuntime.RuntimeState.RUNNING, + is_upgradable=True, + version="version_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + response = client.get_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.GetNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntime) + assert response.name == "name_value" + assert response.runtime_user == "runtime_user_value" + assert response.proxy_uri == "proxy_uri_value" + assert response.health_state == notebook_runtime.NotebookRuntime.HealthState.HEALTHY + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.service_account == "service_account_value" + assert ( + response.runtime_state == notebook_runtime.NotebookRuntime.RuntimeState.RUNNING + ) + assert response.is_upgradable is True + assert response.version == "version_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +def test_get_notebook_runtime_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + client.get_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookRuntimeRequest() + + +def test_get_notebook_runtime_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.GetNotebookRuntimeRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + client.get_notebook_runtime(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookRuntimeRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntime( + name="name_value", + runtime_user="runtime_user_value", + proxy_uri="proxy_uri_value", + health_state=notebook_runtime.NotebookRuntime.HealthState.HEALTHY, + display_name="display_name_value", + description="description_value", + service_account="service_account_value", + runtime_state=notebook_runtime.NotebookRuntime.RuntimeState.RUNNING, + is_upgradable=True, + version="version_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + ) + response = await client.get_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.GetNotebookRuntimeRequest() + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.GetNotebookRuntimeRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntime( + name="name_value", + runtime_user="runtime_user_value", + proxy_uri="proxy_uri_value", + health_state=notebook_runtime.NotebookRuntime.HealthState.HEALTHY, + display_name="display_name_value", + description="description_value", + service_account="service_account_value", + runtime_state=notebook_runtime.NotebookRuntime.RuntimeState.RUNNING, + is_upgradable=True, + version="version_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + ) + response = await client.get_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.GetNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntime) + assert response.name == "name_value" + assert response.runtime_user == "runtime_user_value" + assert response.proxy_uri == "proxy_uri_value" + assert response.health_state == notebook_runtime.NotebookRuntime.HealthState.HEALTHY + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.service_account == "service_account_value" + assert ( + response.runtime_state == notebook_runtime.NotebookRuntime.RuntimeState.RUNNING + ) + assert response.is_upgradable is True + assert response.version == "version_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_async_from_dict(): + await test_get_notebook_runtime_async(request_type=dict) + + +def test_get_notebook_runtime_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.GetNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + call.return_value = notebook_runtime.NotebookRuntime() + client.get_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.GetNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntime() + ) + await client.get_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_notebook_runtime_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_runtime.NotebookRuntime() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_notebook_runtime_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notebook_runtime( + notebook_service.GetNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_runtime.NotebookRuntime() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_runtime.NotebookRuntime() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_notebook_runtime_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_notebook_runtime( + notebook_service.GetNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.ListNotebookRuntimesRequest, + dict, + ], +) +def test_list_notebook_runtimes(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookRuntimesResponse( + next_page_token="next_page_token_value", + ) + response = client.list_notebook_runtimes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.ListNotebookRuntimesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_notebook_runtimes_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + client.list_notebook_runtimes() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookRuntimesRequest() + + +def test_list_notebook_runtimes_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.ListNotebookRuntimesRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + order_by="order_by_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + client.list_notebook_runtimes(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookRuntimesRequest( + parent="parent_value", + filter="filter_value", + page_token="page_token_value", + order_by="order_by_value", + ) + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_notebook_runtimes() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.ListNotebookRuntimesRequest() + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.ListNotebookRuntimesRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_notebook_runtimes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.ListNotebookRuntimesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimesAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_async_from_dict(): + await test_list_notebook_runtimes_async(request_type=dict) + + +def test_list_notebook_runtimes_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.ListNotebookRuntimesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + call.return_value = notebook_service.ListNotebookRuntimesResponse() + client.list_notebook_runtimes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.ListNotebookRuntimesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimesResponse() + ) + await client.list_notebook_runtimes(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_notebook_runtimes_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookRuntimesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_notebook_runtimes( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_notebook_runtimes_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_notebook_runtimes( + notebook_service.ListNotebookRuntimesRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = notebook_service.ListNotebookRuntimesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + notebook_service.ListNotebookRuntimesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_notebook_runtimes( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_notebook_runtimes( + notebook_service.ListNotebookRuntimesRequest(), + parent="parent_value", + ) + + +def test_list_notebook_runtimes_pager(transport_name: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_notebook_runtimes(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, notebook_runtime.NotebookRuntime) for i in results) + + +def test_list_notebook_runtimes_pages(transport_name: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), "__call__" + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + ), + RuntimeError, + ) + pages = list(client.list_notebook_runtimes(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_async_pager(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_notebook_runtimes( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, notebook_runtime.NotebookRuntime) for i in responses) + + +@pytest.mark.asyncio +async def test_list_notebook_runtimes_async_pages(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_notebook_runtimes), + "__call__", + new_callable=mock.AsyncMock, + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_notebook_runtimes(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.DeleteNotebookRuntimeRequest, + dict, + ], +) +def test_delete_notebook_runtime(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.DeleteNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_notebook_runtime_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + client.delete_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookRuntimeRequest() + + +def test_delete_notebook_runtime_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.DeleteNotebookRuntimeRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + client.delete_notebook_runtime(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookRuntimeRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.DeleteNotebookRuntimeRequest() + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.DeleteNotebookRuntimeRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.DeleteNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_async_from_dict(): + await test_delete_notebook_runtime_async(request_type=dict) + + +def test_delete_notebook_runtime_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.DeleteNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.DeleteNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_notebook_runtime_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_notebook_runtime_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_notebook_runtime( + notebook_service.DeleteNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_notebook_runtime_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_notebook_runtime( + notebook_service.DeleteNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.UpgradeNotebookRuntimeRequest, + dict, + ], +) +def test_upgrade_notebook_runtime(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.upgrade_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.UpgradeNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_upgrade_notebook_runtime_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + client.upgrade_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.UpgradeNotebookRuntimeRequest() + + +def test_upgrade_notebook_runtime_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.UpgradeNotebookRuntimeRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + client.upgrade_notebook_runtime(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.UpgradeNotebookRuntimeRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_upgrade_notebook_runtime_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.upgrade_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.UpgradeNotebookRuntimeRequest() + + +@pytest.mark.asyncio +async def test_upgrade_notebook_runtime_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.UpgradeNotebookRuntimeRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.upgrade_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.UpgradeNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_upgrade_notebook_runtime_async_from_dict(): + await test_upgrade_notebook_runtime_async(request_type=dict) + + +def test_upgrade_notebook_runtime_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.UpgradeNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.upgrade_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_upgrade_notebook_runtime_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.UpgradeNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.upgrade_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_upgrade_notebook_runtime_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.upgrade_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_upgrade_notebook_runtime_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.upgrade_notebook_runtime( + notebook_service.UpgradeNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_upgrade_notebook_runtime_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.upgrade_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.upgrade_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_upgrade_notebook_runtime_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.upgrade_notebook_runtime( + notebook_service.UpgradeNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.StartNotebookRuntimeRequest, + dict, + ], +) +def test_start_notebook_runtime(request_type, transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.start_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = notebook_service.StartNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_start_notebook_runtime_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + client.start_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.StartNotebookRuntimeRequest() + + +def test_start_notebook_runtime_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = notebook_service.StartNotebookRuntimeRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + client.start_notebook_runtime(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.StartNotebookRuntimeRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_start_notebook_runtime_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.start_notebook_runtime() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == notebook_service.StartNotebookRuntimeRequest() + + +@pytest.mark.asyncio +async def test_start_notebook_runtime_async( + transport: str = "grpc_asyncio", + request_type=notebook_service.StartNotebookRuntimeRequest, +): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.start_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = notebook_service.StartNotebookRuntimeRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_start_notebook_runtime_async_from_dict(): + await test_start_notebook_runtime_async(request_type=dict) + + +def test_start_notebook_runtime_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.StartNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.start_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_start_notebook_runtime_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = notebook_service.StartNotebookRuntimeRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.start_notebook_runtime(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_start_notebook_runtime_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.start_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_start_notebook_runtime_flattened_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.start_notebook_runtime( + notebook_service.StartNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_start_notebook_runtime_flattened_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_notebook_runtime), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.start_notebook_runtime( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_start_notebook_runtime_flattened_error_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.start_notebook_runtime( + notebook_service.StartNotebookRuntimeRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.CreateNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_create_notebook_runtime_template_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["notebook_runtime_template"] = { + "name": "name_value", + "display_name": "display_name_value", + "description": "description_value", + "is_default": True, + "machine_spec": { + "machine_type": "machine_type_value", + "accelerator_type": 1, + "accelerator_count": 1805, + "tpu_topology": "tpu_topology_value", + }, + "data_persistent_disk_spec": { + "disk_type": "disk_type_value", + "disk_size_gb": 1261, + }, + "network_spec": { + "enable_internet_access": True, + "network": "network_value", + "subnetwork": "subnetwork_value", + }, + "service_account": "service_account_value", + "etag": "etag_value", + "labels": {}, + "idle_shutdown_config": { + "idle_timeout": {"seconds": 751, "nanos": 543}, + "idle_shutdown_disabled": True, + }, + "euc_config": {"euc_disabled": True, "bypass_actas_check": True}, + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + "notebook_runtime_type": 1, + "shielded_vm_config": {"enable_secure_boot": True}, + "network_tags": ["network_tags_value1", "network_tags_value2"], + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = notebook_service.CreateNotebookRuntimeTemplateRequest.meta.fields[ + "notebook_runtime_template" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init[ + "notebook_runtime_template" + ].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range( + 0, len(request_init["notebook_runtime_template"][field]) + ): + del request_init["notebook_runtime_template"][field][i][subfield] + else: + del request_init["notebook_runtime_template"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_notebook_runtime_template(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_notebook_runtime_template_rest_required_fields( + request_type=notebook_service.CreateNotebookRuntimeTemplateRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_notebook_runtime_template._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("notebook_runtime_template_id",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_notebook_runtime_template(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_notebook_runtime_template_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.create_notebook_runtime_template._get_unset_required_fields({}) + ) + assert set(unset_fields) == ( + set(("notebookRuntimeTemplateId",)) + & set( + ( + "parent", + "notebookRuntimeTemplate", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_notebook_runtime_template_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, + "post_create_notebook_runtime_template", + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, + "pre_create_notebook_runtime_template", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.CreateNotebookRuntimeTemplateRequest.pb( + notebook_service.CreateNotebookRuntimeTemplateRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.CreateNotebookRuntimeTemplateRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_notebook_runtime_template( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_notebook_runtime_template_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.CreateNotebookRuntimeTemplateRequest, +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_notebook_runtime_template(request) + + +def test_create_notebook_runtime_template_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_notebook_runtime_template(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*}/notebookRuntimeTemplates" + % client.transport._host, + args[1], + ) + + +def test_create_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_notebook_runtime_template( + notebook_service.CreateNotebookRuntimeTemplateRequest(), + parent="parent_value", + notebook_runtime_template=notebook_runtime.NotebookRuntimeTemplate( + name="name_value" + ), + notebook_runtime_template_id="notebook_runtime_template_id_value", + ) + + +def test_create_notebook_runtime_template_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.GetNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_get_notebook_runtime_template_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntimeTemplate( + name="name_value", + display_name="display_name_value", + description="description_value", + is_default=True, + service_account="service_account_value", + etag="etag_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_notebook_runtime_template(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntimeTemplate) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.is_default is True + assert response.service_account == "service_account_value" + assert response.etag == "etag_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +def test_get_notebook_runtime_template_rest_required_fields( + request_type=notebook_service.GetNotebookRuntimeTemplateRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntimeTemplate() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_notebook_runtime_template(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_notebook_runtime_template_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_notebook_runtime_template._get_unset_required_fields( + {} + ) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_notebook_runtime_template_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_get_notebook_runtime_template" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_get_notebook_runtime_template" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.GetNotebookRuntimeTemplateRequest.pb( + notebook_service.GetNotebookRuntimeTemplateRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = notebook_runtime.NotebookRuntimeTemplate.to_json( + notebook_runtime.NotebookRuntimeTemplate() + ) + + request = notebook_service.GetNotebookRuntimeTemplateRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = notebook_runtime.NotebookRuntimeTemplate() + + client.get_notebook_runtime_template( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_notebook_runtime_template_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.GetNotebookRuntimeTemplateRequest, +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_notebook_runtime_template(request) + + +def test_get_notebook_runtime_template_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntimeTemplate() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntimeTemplate.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_notebook_runtime_template(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}" + % client.transport._host, + args[1], + ) + + +def test_get_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notebook_runtime_template( + notebook_service.GetNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +def test_get_notebook_runtime_template_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.ListNotebookRuntimeTemplatesRequest, + dict, + ], +) +def test_list_notebook_runtime_templates_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_notebook_runtime_templates(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimeTemplatesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_notebook_runtime_templates_rest_required_fields( + request_type=notebook_service.ListNotebookRuntimeTemplatesRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_notebook_runtime_templates._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_notebook_runtime_templates._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + "read_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_notebook_runtime_templates(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_notebook_runtime_templates_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_notebook_runtime_templates._get_unset_required_fields( + {} + ) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + "readMask", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_notebook_runtime_templates_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.NotebookServiceRestInterceptor, + "post_list_notebook_runtime_templates", + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_list_notebook_runtime_templates" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.ListNotebookRuntimeTemplatesRequest.pb( + notebook_service.ListNotebookRuntimeTemplatesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + notebook_service.ListNotebookRuntimeTemplatesResponse.to_json( + notebook_service.ListNotebookRuntimeTemplatesResponse() + ) + ) + + request = notebook_service.ListNotebookRuntimeTemplatesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + + client.list_notebook_runtime_templates( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_notebook_runtime_templates_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.ListNotebookRuntimeTemplatesRequest, +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_notebook_runtime_templates(request) + + +def test_list_notebook_runtime_templates_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimeTemplatesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_notebook_runtime_templates(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*}/notebookRuntimeTemplates" + % client.transport._host, + args[1], + ) + + +def test_list_notebook_runtime_templates_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_notebook_runtime_templates( + notebook_service.ListNotebookRuntimeTemplatesRequest(), + parent="parent_value", + ) + + +def test_list_notebook_runtime_templates_rest_pager(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimeTemplatesResponse( + notebook_runtime_templates=[ + notebook_runtime.NotebookRuntimeTemplate(), + notebook_runtime.NotebookRuntimeTemplate(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + notebook_service.ListNotebookRuntimeTemplatesResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_notebook_runtime_templates(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, notebook_runtime.NotebookRuntimeTemplate) for i in results + ) + + pages = list( + client.list_notebook_runtime_templates(request=sample_request).pages + ) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.DeleteNotebookRuntimeTemplateRequest, + dict, + ], +) +def test_delete_notebook_runtime_template_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_notebook_runtime_template(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_notebook_runtime_template_rest_required_fields( + request_type=notebook_service.DeleteNotebookRuntimeTemplateRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_notebook_runtime_template._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_notebook_runtime_template(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_notebook_runtime_template_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = ( + transport.delete_notebook_runtime_template._get_unset_required_fields({}) + ) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_notebook_runtime_template_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, + "post_delete_notebook_runtime_template", + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, + "pre_delete_notebook_runtime_template", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.DeleteNotebookRuntimeTemplateRequest.pb( + notebook_service.DeleteNotebookRuntimeTemplateRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.DeleteNotebookRuntimeTemplateRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_notebook_runtime_template( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_notebook_runtime_template_rest_bad_request( + transport: str = "rest", + request_type=notebook_service.DeleteNotebookRuntimeTemplateRequest, +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_notebook_runtime_template(request) + + +def test_delete_notebook_runtime_template_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimeTemplates/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_notebook_runtime_template(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/notebookRuntimeTemplates/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_notebook_runtime_template_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_notebook_runtime_template( + notebook_service.DeleteNotebookRuntimeTemplateRequest(), + name="name_value", + ) + + +def test_delete_notebook_runtime_template_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.AssignNotebookRuntimeRequest, + dict, + ], +) +def test_assign_notebook_runtime_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.assign_notebook_runtime(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_assign_notebook_runtime_rest_required_fields( + request_type=notebook_service.AssignNotebookRuntimeRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["notebook_runtime_template"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).assign_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["notebookRuntimeTemplate"] = "notebook_runtime_template_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).assign_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "notebookRuntimeTemplate" in jsonified_request + assert ( + jsonified_request["notebookRuntimeTemplate"] + == "notebook_runtime_template_value" + ) + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.assign_notebook_runtime(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_assign_notebook_runtime_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.assign_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "notebookRuntimeTemplate", + "notebookRuntime", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_assign_notebook_runtime_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_assign_notebook_runtime" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_assign_notebook_runtime" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.AssignNotebookRuntimeRequest.pb( + notebook_service.AssignNotebookRuntimeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.AssignNotebookRuntimeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.assign_notebook_runtime( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_assign_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.AssignNotebookRuntimeRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.assign_notebook_runtime(request) + + +def test_assign_notebook_runtime_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.assign_notebook_runtime(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*}/notebookRuntimes:assign" + % client.transport._host, + args[1], + ) + + +def test_assign_notebook_runtime_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.assign_notebook_runtime( + notebook_service.AssignNotebookRuntimeRequest(), + parent="parent_value", + notebook_runtime_template="notebook_runtime_template_value", + notebook_runtime=gca_notebook_runtime.NotebookRuntime(name="name_value"), + notebook_runtime_id="notebook_runtime_id_value", + ) + + +def test_assign_notebook_runtime_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.GetNotebookRuntimeRequest, + dict, + ], +) +def test_get_notebook_runtime_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntime( + name="name_value", + runtime_user="runtime_user_value", + proxy_uri="proxy_uri_value", + health_state=notebook_runtime.NotebookRuntime.HealthState.HEALTHY, + display_name="display_name_value", + description="description_value", + service_account="service_account_value", + runtime_state=notebook_runtime.NotebookRuntime.RuntimeState.RUNNING, + is_upgradable=True, + version="version_value", + notebook_runtime_type=notebook_runtime.NotebookRuntimeType.USER_DEFINED, + network_tags=["network_tags_value"], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntime.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_notebook_runtime(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, notebook_runtime.NotebookRuntime) + assert response.name == "name_value" + assert response.runtime_user == "runtime_user_value" + assert response.proxy_uri == "proxy_uri_value" + assert response.health_state == notebook_runtime.NotebookRuntime.HealthState.HEALTHY + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.service_account == "service_account_value" + assert ( + response.runtime_state == notebook_runtime.NotebookRuntime.RuntimeState.RUNNING + ) + assert response.is_upgradable is True + assert response.version == "version_value" + assert ( + response.notebook_runtime_type + == notebook_runtime.NotebookRuntimeType.USER_DEFINED + ) + assert response.network_tags == ["network_tags_value"] + + +def test_get_notebook_runtime_rest_required_fields( + request_type=notebook_service.GetNotebookRuntimeRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntime() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntime.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_notebook_runtime(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_notebook_runtime_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_notebook_runtime_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_get_notebook_runtime" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_get_notebook_runtime" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.GetNotebookRuntimeRequest.pb( + notebook_service.GetNotebookRuntimeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = notebook_runtime.NotebookRuntime.to_json( + notebook_runtime.NotebookRuntime() + ) + + request = notebook_service.GetNotebookRuntimeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = notebook_runtime.NotebookRuntime() + + client.get_notebook_runtime( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.GetNotebookRuntimeRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_notebook_runtime(request) + + +def test_get_notebook_runtime_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_runtime.NotebookRuntime() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_runtime.NotebookRuntime.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_notebook_runtime(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}" + % client.transport._host, + args[1], + ) + + +def test_get_notebook_runtime_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_notebook_runtime( + notebook_service.GetNotebookRuntimeRequest(), + name="name_value", + ) + + +def test_get_notebook_runtime_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.ListNotebookRuntimesRequest, + dict, + ], +) +def test_list_notebook_runtimes_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_notebook_runtimes(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNotebookRuntimesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_notebook_runtimes_rest_required_fields( + request_type=notebook_service.ListNotebookRuntimesRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_notebook_runtimes._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_notebook_runtimes._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "filter", + "order_by", + "page_size", + "page_token", + "read_mask", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimesResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_notebook_runtimes(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_notebook_runtimes_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_notebook_runtimes._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "filter", + "orderBy", + "pageSize", + "pageToken", + "readMask", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_notebook_runtimes_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_list_notebook_runtimes" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_list_notebook_runtimes" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.ListNotebookRuntimesRequest.pb( + notebook_service.ListNotebookRuntimesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + notebook_service.ListNotebookRuntimesResponse.to_json( + notebook_service.ListNotebookRuntimesResponse() + ) + ) + + request = notebook_service.ListNotebookRuntimesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = notebook_service.ListNotebookRuntimesResponse() + + client.list_notebook_runtimes( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_notebook_runtimes_rest_bad_request( + transport: str = "rest", request_type=notebook_service.ListNotebookRuntimesRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_notebook_runtimes(request) + + +def test_list_notebook_runtimes_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = notebook_service.ListNotebookRuntimesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = notebook_service.ListNotebookRuntimesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_notebook_runtimes(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*}/notebookRuntimes" + % client.transport._host, + args[1], + ) + + +def test_list_notebook_runtimes_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_notebook_runtimes( + notebook_service.ListNotebookRuntimesRequest(), + parent="parent_value", + ) + + +def test_list_notebook_runtimes_rest_pager(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + next_page_token="abc", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[], + next_page_token="def", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + ], + next_page_token="ghi", + ), + notebook_service.ListNotebookRuntimesResponse( + notebook_runtimes=[ + notebook_runtime.NotebookRuntime(), + notebook_runtime.NotebookRuntime(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + notebook_service.ListNotebookRuntimesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_notebook_runtimes(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, notebook_runtime.NotebookRuntime) for i in results) + + pages = list(client.list_notebook_runtimes(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.DeleteNotebookRuntimeRequest, + dict, + ], +) +def test_delete_notebook_runtime_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_notebook_runtime(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_notebook_runtime_rest_required_fields( + request_type=notebook_service.DeleteNotebookRuntimeRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_notebook_runtime(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_notebook_runtime_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_notebook_runtime_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_delete_notebook_runtime" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_delete_notebook_runtime" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.DeleteNotebookRuntimeRequest.pb( + notebook_service.DeleteNotebookRuntimeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.DeleteNotebookRuntimeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_notebook_runtime( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.DeleteNotebookRuntimeRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_notebook_runtime(request) + + +def test_delete_notebook_runtime_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_notebook_runtime(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_notebook_runtime_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_notebook_runtime( + notebook_service.DeleteNotebookRuntimeRequest(), + name="name_value", + ) + + +def test_delete_notebook_runtime_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.UpgradeNotebookRuntimeRequest, + dict, + ], +) +def test_upgrade_notebook_runtime_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.upgrade_notebook_runtime(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_upgrade_notebook_runtime_rest_required_fields( + request_type=notebook_service.UpgradeNotebookRuntimeRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).upgrade_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).upgrade_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.upgrade_notebook_runtime(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_upgrade_notebook_runtime_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.upgrade_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_upgrade_notebook_runtime_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_upgrade_notebook_runtime" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_upgrade_notebook_runtime" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.UpgradeNotebookRuntimeRequest.pb( + notebook_service.UpgradeNotebookRuntimeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.UpgradeNotebookRuntimeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.upgrade_notebook_runtime( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_upgrade_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.UpgradeNotebookRuntimeRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.upgrade_notebook_runtime(request) + + +def test_upgrade_notebook_runtime_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.upgrade_notebook_runtime(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}:upgrade" + % client.transport._host, + args[1], + ) + + +def test_upgrade_notebook_runtime_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.upgrade_notebook_runtime( + notebook_service.UpgradeNotebookRuntimeRequest(), + name="name_value", + ) + + +def test_upgrade_notebook_runtime_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + notebook_service.StartNotebookRuntimeRequest, + dict, + ], +) +def test_start_notebook_runtime_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.start_notebook_runtime(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_start_notebook_runtime_rest_required_fields( + request_type=notebook_service.StartNotebookRuntimeRequest, +): + transport_class = transports.NotebookServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).start_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).start_notebook_runtime._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.start_notebook_runtime(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_start_notebook_runtime_rest_unset_required_fields(): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.start_notebook_runtime._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_start_notebook_runtime_rest_interceptors(null_interceptor): + transport = transports.NotebookServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.NotebookServiceRestInterceptor(), + ) + client = NotebookServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.NotebookServiceRestInterceptor, "post_start_notebook_runtime" + ) as post, mock.patch.object( + transports.NotebookServiceRestInterceptor, "pre_start_notebook_runtime" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = notebook_service.StartNotebookRuntimeRequest.pb( + notebook_service.StartNotebookRuntimeRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = notebook_service.StartNotebookRuntimeRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.start_notebook_runtime( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_start_notebook_runtime_rest_bad_request( + transport: str = "rest", request_type=notebook_service.StartNotebookRuntimeRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.start_notebook_runtime(request) + + +def test_start_notebook_runtime_rest_flattened(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/notebookRuntimes/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.start_notebook_runtime(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/notebookRuntimes/*}:start" + % client.transport._host, + args[1], + ) + + +def test_start_notebook_runtime_rest_flattened_error(transport: str = "rest"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.start_notebook_runtime( + notebook_service.StartNotebookRuntimeRequest(), + name="name_value", + ) + + +def test_start_notebook_runtime_rest_error(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.NotebookServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.NotebookServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NotebookServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.NotebookServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = NotebookServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = NotebookServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.NotebookServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NotebookServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.NotebookServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = NotebookServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.NotebookServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.NotebookServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceGrpcAsyncIOTransport, + transports.NotebookServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = NotebookServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.NotebookServiceGrpcTransport, + ) + + +def test_notebook_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.NotebookServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_notebook_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.notebook_service.transports.NotebookServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.NotebookServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_notebook_runtime_template", + "get_notebook_runtime_template", + "list_notebook_runtime_templates", + "delete_notebook_runtime_template", + "assign_notebook_runtime", + "get_notebook_runtime", + "list_notebook_runtimes", + "delete_notebook_runtime", + "upgrade_notebook_runtime", + "start_notebook_runtime", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + "get_location", + "list_locations", + "get_operation", + "wait_operation", + "cancel_operation", + "delete_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_notebook_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.notebook_service.transports.NotebookServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NotebookServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_notebook_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.notebook_service.transports.NotebookServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NotebookServiceTransport() + adc.assert_called_once() + + +def test_notebook_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + NotebookServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceGrpcAsyncIOTransport, + ], +) +def test_notebook_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceGrpcAsyncIOTransport, + transports.NotebookServiceRestTransport, + ], +) +def test_notebook_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.NotebookServiceGrpcTransport, grpc_helpers), + (transports.NotebookServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_notebook_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceGrpcAsyncIOTransport, + ], +) +def test_notebook_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_notebook_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.NotebookServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_notebook_service_rest_lro_client(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_notebook_service_host_no_port(transport_name): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_notebook_service_host_with_port(transport_name): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_notebook_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = NotebookServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = NotebookServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_notebook_runtime_template._session + session2 = client2.transport.create_notebook_runtime_template._session + assert session1 != session2 + session1 = client1.transport.get_notebook_runtime_template._session + session2 = client2.transport.get_notebook_runtime_template._session + assert session1 != session2 + session1 = client1.transport.list_notebook_runtime_templates._session + session2 = client2.transport.list_notebook_runtime_templates._session + assert session1 != session2 + session1 = client1.transport.delete_notebook_runtime_template._session + session2 = client2.transport.delete_notebook_runtime_template._session + assert session1 != session2 + session1 = client1.transport.assign_notebook_runtime._session + session2 = client2.transport.assign_notebook_runtime._session + assert session1 != session2 + session1 = client1.transport.get_notebook_runtime._session + session2 = client2.transport.get_notebook_runtime._session + assert session1 != session2 + session1 = client1.transport.list_notebook_runtimes._session + session2 = client2.transport.list_notebook_runtimes._session + assert session1 != session2 + session1 = client1.transport.delete_notebook_runtime._session + session2 = client2.transport.delete_notebook_runtime._session + assert session1 != session2 + session1 = client1.transport.upgrade_notebook_runtime._session + session2 = client2.transport.upgrade_notebook_runtime._session + assert session1 != session2 + session1 = client1.transport.start_notebook_runtime._session + session2 = client2.transport.start_notebook_runtime._session + assert session1 != session2 + + +def test_notebook_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.NotebookServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_notebook_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.NotebookServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceGrpcAsyncIOTransport, + ], +) +def test_notebook_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.NotebookServiceGrpcTransport, + transports.NotebookServiceGrpcAsyncIOTransport, + ], +) +def test_notebook_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_notebook_service_grpc_lro_client(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_notebook_service_grpc_lro_async_client(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_network_path(): + project = "squid" + network = "clam" + expected = "projects/{project}/global/networks/{network}".format( + project=project, + network=network, + ) + actual = NotebookServiceClient.network_path(project, network) + assert expected == actual + + +def test_parse_network_path(): + expected = { + "project": "whelk", + "network": "octopus", + } + path = NotebookServiceClient.network_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_network_path(path) + assert expected == actual + + +def test_notebook_runtime_path(): + project = "oyster" + location = "nudibranch" + notebook_runtime = "cuttlefish" + expected = "projects/{project}/locations/{location}/notebookRuntimes/{notebook_runtime}".format( + project=project, + location=location, + notebook_runtime=notebook_runtime, + ) + actual = NotebookServiceClient.notebook_runtime_path( + project, location, notebook_runtime + ) + assert expected == actual + + +def test_parse_notebook_runtime_path(): + expected = { + "project": "mussel", + "location": "winkle", + "notebook_runtime": "nautilus", + } + path = NotebookServiceClient.notebook_runtime_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_notebook_runtime_path(path) + assert expected == actual + + +def test_notebook_runtime_template_path(): + project = "scallop" + location = "abalone" + notebook_runtime_template = "squid" + expected = "projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}".format( + project=project, + location=location, + notebook_runtime_template=notebook_runtime_template, + ) + actual = NotebookServiceClient.notebook_runtime_template_path( + project, location, notebook_runtime_template + ) + assert expected == actual + + +def test_parse_notebook_runtime_template_path(): + expected = { + "project": "clam", + "location": "whelk", + "notebook_runtime_template": "octopus", + } + path = NotebookServiceClient.notebook_runtime_template_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_notebook_runtime_template_path(path) + assert expected == actual + + +def test_subnetwork_path(): + project = "oyster" + region = "nudibranch" + subnetwork = "cuttlefish" + expected = "projects/{project}/regions/{region}/subnetworks/{subnetwork}".format( + project=project, + region=region, + subnetwork=subnetwork, + ) + actual = NotebookServiceClient.subnetwork_path(project, region, subnetwork) + assert expected == actual + + +def test_parse_subnetwork_path(): + expected = { + "project": "mussel", + "region": "winkle", + "subnetwork": "nautilus", + } + path = NotebookServiceClient.subnetwork_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_subnetwork_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "scallop" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = NotebookServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = NotebookServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "squid" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = NotebookServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "clam", + } + path = NotebookServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "whelk" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = NotebookServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "octopus", + } + path = NotebookServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "oyster" + expected = "projects/{project}".format( + project=project, + ) + actual = NotebookServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = NotebookServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = NotebookServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = NotebookServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = NotebookServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.NotebookServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.NotebookServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = NotebookServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_get_location_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.GetLocationRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_location(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.GetLocationRequest, + dict, + ], +) +def test_get_location_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.Location() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_location(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_list_locations_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.ListLocationsRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({"name": "projects/sample1"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_locations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.ListLocationsRequest, + dict, + ], +) +def test_list_locations_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_locations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_get_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_set_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_test_iam_permissions_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + +def test_cancel_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.CancelOperationRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.DeleteOperationRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.DeleteOperationRequest, + dict, + ], +) +def test_delete_operation_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.ListOperationsRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_wait_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.WaitOperationRequest +): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.WaitOperationRequest, + dict, + ], +) +def test_wait_operation_rest(request_type): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.wait_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_delete_operation(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_delete_operation_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_wait_operation(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_wait_operation_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_wait_operation_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_list_locations_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_locations_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_get_location_field_headers(): + client = NotebookServiceClient(credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +def test_get_location_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +def test_get_iam_policy(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_test_iam_permissions(transport: str = "grpc"): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = NotebookServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = NotebookServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (NotebookServiceClient, transports.NotebookServiceGrpcTransport), + (NotebookServiceAsyncClient, transports.NotebookServiceGrpcAsyncIOTransport), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py index 2bd0ade54a..51044be81c 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_persistent_resource_service.py @@ -2986,6 +2986,299 @@ async def test_update_persistent_resource_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.RebootPersistentResourceRequest, + dict, + ], +) +def test_reboot_persistent_resource(request_type, transport: str = "grpc"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.reboot_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.RebootPersistentResourceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_reboot_persistent_resource_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + client.reboot_persistent_resource() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.RebootPersistentResourceRequest() + + +def test_reboot_persistent_resource_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = persistent_resource_service.RebootPersistentResourceRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + client.reboot_persistent_resource(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.RebootPersistentResourceRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_reboot_persistent_resource_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.reboot_persistent_resource() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == persistent_resource_service.RebootPersistentResourceRequest() + + +@pytest.mark.asyncio +async def test_reboot_persistent_resource_async( + transport: str = "grpc_asyncio", + request_type=persistent_resource_service.RebootPersistentResourceRequest, +): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.reboot_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = persistent_resource_service.RebootPersistentResourceRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_reboot_persistent_resource_async_from_dict(): + await test_reboot_persistent_resource_async(request_type=dict) + + +def test_reboot_persistent_resource_field_headers(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.RebootPersistentResourceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.reboot_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_reboot_persistent_resource_field_headers_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = persistent_resource_service.RebootPersistentResourceRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.reboot_persistent_resource(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_reboot_persistent_resource_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.reboot_persistent_resource( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_reboot_persistent_resource_flattened_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.reboot_persistent_resource( + persistent_resource_service.RebootPersistentResourceRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_reboot_persistent_resource_flattened_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.reboot_persistent_resource), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.reboot_persistent_resource( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_reboot_persistent_resource_flattened_error_async(): + client = PersistentResourceServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.reboot_persistent_resource( + persistent_resource_service.RebootPersistentResourceRequest(), + name="name_value", + ) + + @pytest.mark.parametrize( "request_type", [ @@ -3054,7 +3347,10 @@ def test_create_persistent_resource_rest(request_type): "ray_metric_spec": {"disabled": True}, }, }, - "resource_runtime": {"access_uris": {}}, + "resource_runtime": { + "access_uris": {}, + "notebook_runtime_template": "notebook_runtime_template_value", + }, "reserved_ip_ranges": [ "reserved_ip_ranges_value1", "reserved_ip_ranges_value2", @@ -4394,7 +4690,10 @@ def test_update_persistent_resource_rest(request_type): "ray_metric_spec": {"disabled": True}, }, }, - "resource_runtime": {"access_uris": {}}, + "resource_runtime": { + "access_uris": {}, + "notebook_runtime_template": "notebook_runtime_template_value", + }, "reserved_ip_ranges": [ "reserved_ip_ranges_value1", "reserved_ip_ranges_value2", @@ -4738,12 +5037,281 @@ def test_update_persistent_resource_rest_error(): ) -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.PersistentResourceServiceGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): +@pytest.mark.parametrize( + "request_type", + [ + persistent_resource_service.RebootPersistentResourceRequest, + dict, + ], +) +def test_reboot_persistent_resource_rest(request_type): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.reboot_persistent_resource(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_reboot_persistent_resource_rest_required_fields( + request_type=persistent_resource_service.RebootPersistentResourceRequest, +): + transport_class = transports.PersistentResourceServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).reboot_persistent_resource._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).reboot_persistent_resource._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.reboot_persistent_resource(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_reboot_persistent_resource_rest_unset_required_fields(): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.reboot_persistent_resource._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_reboot_persistent_resource_rest_interceptors(null_interceptor): + transport = transports.PersistentResourceServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PersistentResourceServiceRestInterceptor(), + ) + client = PersistentResourceServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "post_reboot_persistent_resource", + ) as post, mock.patch.object( + transports.PersistentResourceServiceRestInterceptor, + "pre_reboot_persistent_resource", + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = persistent_resource_service.RebootPersistentResourceRequest.pb( + persistent_resource_service.RebootPersistentResourceRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = persistent_resource_service.RebootPersistentResourceRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.reboot_persistent_resource( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_reboot_persistent_resource_rest_bad_request( + transport: str = "rest", + request_type=persistent_resource_service.RebootPersistentResourceRequest, +): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.reboot_persistent_resource(request) + + +def test_reboot_persistent_resource_rest_flattened(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/persistentResources/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.reboot_persistent_resource(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/persistentResources/*}:reboot" + % client.transport._host, + args[1], + ) + + +def test_reboot_persistent_resource_rest_flattened_error(transport: str = "rest"): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.reboot_persistent_resource( + persistent_resource_service.RebootPersistentResourceRequest(), + name="name_value", + ) + + +def test_reboot_persistent_resource_rest_error(): + client = PersistentResourceServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PersistentResourceServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): client = PersistentResourceServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -4882,6 +5450,7 @@ def test_persistent_resource_service_base_transport(): "list_persistent_resources", "delete_persistent_resource", "update_persistent_resource", + "reboot_persistent_resource", "set_iam_policy", "get_iam_policy", "test_iam_permissions", @@ -5191,6 +5760,9 @@ def test_persistent_resource_service_client_transport_session_collision(transpor session1 = client1.transport.update_persistent_resource._session session2 = client2.transport.update_persistent_resource._session assert session1 != session2 + session1 = client1.transport.reboot_persistent_resource._session + session2 = client2.transport.reboot_persistent_resource._session + assert session1 != session2 def test_persistent_resource_service_grpc_transport_channel(): @@ -5376,10 +5948,38 @@ def test_parse_network_path(): assert expected == actual -def test_persistent_resource_path(): +def test_notebook_runtime_template_path(): project = "oyster" location = "nudibranch" - persistent_resource = "cuttlefish" + notebook_runtime_template = "cuttlefish" + expected = "projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}".format( + project=project, + location=location, + notebook_runtime_template=notebook_runtime_template, + ) + actual = PersistentResourceServiceClient.notebook_runtime_template_path( + project, location, notebook_runtime_template + ) + assert expected == actual + + +def test_parse_notebook_runtime_template_path(): + expected = { + "project": "mussel", + "location": "winkle", + "notebook_runtime_template": "nautilus", + } + path = PersistentResourceServiceClient.notebook_runtime_template_path(**expected) + + # Check that the path construction is reversible. + actual = PersistentResourceServiceClient.parse_notebook_runtime_template_path(path) + assert expected == actual + + +def test_persistent_resource_path(): + project = "scallop" + location = "abalone" + persistent_resource = "squid" expected = "projects/{project}/locations/{location}/persistentResources/{persistent_resource}".format( project=project, location=location, @@ -5393,9 +5993,9 @@ def test_persistent_resource_path(): def test_parse_persistent_resource_path(): expected = { - "project": "mussel", - "location": "winkle", - "persistent_resource": "nautilus", + "project": "clam", + "location": "whelk", + "persistent_resource": "octopus", } path = PersistentResourceServiceClient.persistent_resource_path(**expected) @@ -5405,7 +6005,7 @@ def test_parse_persistent_resource_path(): def test_common_billing_account_path(): - billing_account = "scallop" + billing_account = "oyster" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -5417,7 +6017,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "abalone", + "billing_account": "nudibranch", } path = PersistentResourceServiceClient.common_billing_account_path(**expected) @@ -5427,7 +6027,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "squid" + folder = "cuttlefish" expected = "folders/{folder}".format( folder=folder, ) @@ -5437,7 +6037,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "clam", + "folder": "mussel", } path = PersistentResourceServiceClient.common_folder_path(**expected) @@ -5447,7 +6047,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "whelk" + organization = "winkle" expected = "organizations/{organization}".format( organization=organization, ) @@ -5457,7 +6057,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "octopus", + "organization": "nautilus", } path = PersistentResourceServiceClient.common_organization_path(**expected) @@ -5467,7 +6067,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "oyster" + project = "scallop" expected = "projects/{project}".format( project=project, ) @@ -5477,7 +6077,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "nudibranch", + "project": "abalone", } path = PersistentResourceServiceClient.common_project_path(**expected) @@ -5487,8 +6087,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "cuttlefish" - location = "mussel" + project = "squid" + location = "clam" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -5499,8 +6099,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "winkle", - "location": "nautilus", + "project": "whelk", + "location": "octopus", } path = PersistentResourceServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py index c450cdd88f..96cde1d559 100644 --- a/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py +++ b/tests/unit/gapic/aiplatform_v1beta1/test_prediction_service.py @@ -3777,6 +3777,288 @@ async def test_stream_generate_content_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + prediction_service.ChatCompletionsRequest, + dict, + ], +) +def test_chat_completions(request_type, transport: str = "grpc"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.chat_completions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([httpbody_pb2.HttpBody()]) + response = client.chat_completions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = prediction_service.ChatCompletionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, httpbody_pb2.HttpBody) + + +def test_chat_completions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.chat_completions), "__call__") as call: + client.chat_completions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.ChatCompletionsRequest() + + +def test_chat_completions_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = prediction_service.ChatCompletionsRequest( + endpoint="endpoint_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.chat_completions), "__call__") as call: + client.chat_completions(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.ChatCompletionsRequest( + endpoint="endpoint_value", + ) + + +@pytest.mark.asyncio +async def test_chat_completions_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.chat_completions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[httpbody_pb2.HttpBody()]) + response = await client.chat_completions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == prediction_service.ChatCompletionsRequest() + + +@pytest.mark.asyncio +async def test_chat_completions_async( + transport: str = "grpc_asyncio", + request_type=prediction_service.ChatCompletionsRequest, +): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.chat_completions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[httpbody_pb2.HttpBody()]) + response = await client.chat_completions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = prediction_service.ChatCompletionsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, httpbody_pb2.HttpBody) + + +@pytest.mark.asyncio +async def test_chat_completions_async_from_dict(): + await test_chat_completions_async(request_type=dict) + + +def test_chat_completions_field_headers(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.ChatCompletionsRequest() + + request.endpoint = "endpoint_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.chat_completions), "__call__") as call: + call.return_value = iter([httpbody_pb2.HttpBody()]) + client.chat_completions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "endpoint=endpoint_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_chat_completions_field_headers_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = prediction_service.ChatCompletionsRequest() + + request.endpoint = "endpoint_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.chat_completions), "__call__") as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[httpbody_pb2.HttpBody()]) + await client.chat_completions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "endpoint=endpoint_value", + ) in kw["metadata"] + + +def test_chat_completions_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.chat_completions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([httpbody_pb2.HttpBody()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.chat_completions( + endpoint="endpoint_value", + http_body=httpbody_pb2.HttpBody(content_type="content_type_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = "endpoint_value" + assert arg == mock_val + arg = args[0].http_body + mock_val = httpbody_pb2.HttpBody(content_type="content_type_value") + assert arg == mock_val + + +def test_chat_completions_flattened_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.chat_completions( + prediction_service.ChatCompletionsRequest(), + endpoint="endpoint_value", + http_body=httpbody_pb2.HttpBody(content_type="content_type_value"), + ) + + +@pytest.mark.asyncio +async def test_chat_completions_flattened_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.chat_completions), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([httpbody_pb2.HttpBody()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.chat_completions( + endpoint="endpoint_value", + http_body=httpbody_pb2.HttpBody(content_type="content_type_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].endpoint + mock_val = "endpoint_value" + assert arg == mock_val + arg = args[0].http_body + mock_val = httpbody_pb2.HttpBody(content_type="content_type_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_chat_completions_flattened_error_async(): + client = PredictionServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.chat_completions( + prediction_service.ChatCompletionsRequest(), + endpoint="endpoint_value", + http_body=httpbody_pb2.HttpBody(content_type="content_type_value"), + ) + + @pytest.mark.parametrize( "request_type", [ @@ -6149,6 +6431,363 @@ def test_stream_generate_content_rest_error(): ) +@pytest.mark.parametrize( + "request_type", + [ + prediction_service.ChatCompletionsRequest, + dict, + ], +) +def test_chat_completions_rest(request_type): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"} + request_init["http_body"] = { + "content_type": "content_type_value", + "data": b"data_blob", + "extensions": [ + { + "type_url": "type.googleapis.com/google.protobuf.Duration", + "value": b"\x08\x0c\x10\xdb\x07", + } + ], + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = prediction_service.ChatCompletionsRequest.meta.fields["http_body"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["http_body"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["http_body"][field])): + del request_init["http_body"][field][i][subfield] + else: + del request_init["http_body"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = httpbody_pb2.HttpBody( + content_type="content_type_value", + data=b"data_blob", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.chat_completions(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, httpbody_pb2.HttpBody) + assert response.content_type == "content_type_value" + assert response.data == b"data_blob" + + +def test_chat_completions_rest_required_fields( + request_type=prediction_service.ChatCompletionsRequest, +): + transport_class = transports.PredictionServiceRestTransport + + request_init = {} + request_init["endpoint"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).chat_completions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["endpoint"] = "endpoint_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).chat_completions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "endpoint" in jsonified_request + assert jsonified_request["endpoint"] == "endpoint_value" + + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = httpbody_pb2.HttpBody() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.chat_completions(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_chat_completions_rest_unset_required_fields(): + transport = transports.PredictionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.chat_completions._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("endpoint",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_chat_completions_rest_interceptors(null_interceptor): + transport = transports.PredictionServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.PredictionServiceRestInterceptor(), + ) + client = PredictionServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.PredictionServiceRestInterceptor, "post_chat_completions" + ) as post, mock.patch.object( + transports.PredictionServiceRestInterceptor, "pre_chat_completions" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = prediction_service.ChatCompletionsRequest.pb( + prediction_service.ChatCompletionsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(httpbody_pb2.HttpBody()) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = prediction_service.ChatCompletionsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = httpbody_pb2.HttpBody() + + client.chat_completions( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_chat_completions_rest_bad_request( + transport: str = "rest", request_type=prediction_service.ChatCompletionsRequest +): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"endpoint": "projects/sample1/locations/sample2/endpoints/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.chat_completions(request) + + +def test_chat_completions_rest_flattened(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = httpbody_pb2.HttpBody() + + # get arguments that satisfy an http rule for this method + sample_request = { + "endpoint": "projects/sample1/locations/sample2/endpoints/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + endpoint="endpoint_value", + http_body=httpbody_pb2.HttpBody(content_type="content_type_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.chat_completions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}/chat/completions" + % client.transport._host, + args[1], + ) + + +def test_chat_completions_rest_flattened_error(transport: str = "rest"): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.chat_completions( + prediction_service.ChatCompletionsRequest(), + endpoint="endpoint_value", + http_body=httpbody_pb2.HttpBody(content_type="content_type_value"), + ) + + +def test_chat_completions_rest_error(): + client = PredictionServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + def test_stream_direct_predict_rest_error(): client = PredictionServiceClient( credentials=ga_credentials.AnonymousCredentials(), transport="rest" @@ -6353,6 +6992,7 @@ def test_prediction_service_base_transport(): "count_tokens", "generate_content", "stream_generate_content", + "chat_completions", "set_iam_policy", "get_iam_policy", "test_iam_permissions", @@ -6660,6 +7300,9 @@ def test_prediction_service_client_transport_session_collision(transport_name): session1 = client1.transport.stream_generate_content._session session2 = client2.transport.stream_generate_content._session assert session1 != session2 + session1 = client1.transport.chat_completions._session + session2 = client2.transport.chat_completions._session + assert session1 != session2 def test_prediction_service_grpc_transport_channel(): @@ -6840,8 +7483,34 @@ def test_parse_model_path(): assert expected == actual +def test_rag_corpus_path(): + project = "squid" + location = "clam" + rag_corpus = "whelk" + expected = "projects/{project}/locations/{location}/ragCorpora/{rag_corpus}".format( + project=project, + location=location, + rag_corpus=rag_corpus, + ) + actual = PredictionServiceClient.rag_corpus_path(project, location, rag_corpus) + assert expected == actual + + +def test_parse_rag_corpus_path(): + expected = { + "project": "octopus", + "location": "oyster", + "rag_corpus": "nudibranch", + } + path = PredictionServiceClient.rag_corpus_path(**expected) + + # Check that the path construction is reversible. + actual = PredictionServiceClient.parse_rag_corpus_path(path) + assert expected == actual + + def test_common_billing_account_path(): - billing_account = "squid" + billing_account = "cuttlefish" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -6851,7 +7520,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "clam", + "billing_account": "mussel", } path = PredictionServiceClient.common_billing_account_path(**expected) @@ -6861,7 +7530,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "whelk" + folder = "winkle" expected = "folders/{folder}".format( folder=folder, ) @@ -6871,7 +7540,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "octopus", + "folder": "nautilus", } path = PredictionServiceClient.common_folder_path(**expected) @@ -6881,7 +7550,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "oyster" + organization = "scallop" expected = "organizations/{organization}".format( organization=organization, ) @@ -6891,7 +7560,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "nudibranch", + "organization": "abalone", } path = PredictionServiceClient.common_organization_path(**expected) @@ -6901,7 +7570,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "cuttlefish" + project = "squid" expected = "projects/{project}".format( project=project, ) @@ -6911,7 +7580,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "mussel", + "project": "clam", } path = PredictionServiceClient.common_project_path(**expected) @@ -6921,8 +7590,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "winkle" - location = "nautilus" + project = "whelk" + location = "octopus" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -6933,8 +7602,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "scallop", - "location": "abalone", + "project": "oyster", + "location": "nudibranch", } path = PredictionServiceClient.common_location_path(**expected) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py new file mode 100644 index 0000000000..4e4bb0feae --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_data_service.py @@ -0,0 +1,9938 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from google.api_core import api_core_version +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import ( + VertexRagDataServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import ( + VertexRagDataServiceClient, +) +from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import pagers +from google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service import transports +from google.cloud.aiplatform_v1beta1.types import io +from google.cloud.aiplatform_v1beta1.types import operation as gca_operation +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data +from google.cloud.aiplatform_v1beta1.types import vertex_rag_data_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import empty_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert VertexRagDataServiceClient._get_default_mtls_endpoint(None) is None + assert ( + VertexRagDataServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + VertexRagDataServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + VertexRagDataServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + VertexRagDataServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + VertexRagDataServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test__read_environment_variables(): + assert VertexRagDataServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert VertexRagDataServiceClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert VertexRagDataServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + VertexRagDataServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert VertexRagDataServiceClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert VertexRagDataServiceClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert VertexRagDataServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + VertexRagDataServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert VertexRagDataServiceClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert VertexRagDataServiceClient._get_client_cert_source(None, False) is None + assert ( + VertexRagDataServiceClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + VertexRagDataServiceClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + VertexRagDataServiceClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + VertexRagDataServiceClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + VertexRagDataServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagDataServiceClient), +) +@mock.patch.object( + VertexRagDataServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagDataServiceAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = VertexRagDataServiceClient._DEFAULT_UNIVERSE + default_endpoint = VertexRagDataServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = VertexRagDataServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + VertexRagDataServiceClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + VertexRagDataServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == VertexRagDataServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + VertexRagDataServiceClient._get_api_endpoint( + None, None, default_universe, "auto" + ) + == default_endpoint + ) + assert ( + VertexRagDataServiceClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == VertexRagDataServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + VertexRagDataServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == VertexRagDataServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + VertexRagDataServiceClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + VertexRagDataServiceClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + VertexRagDataServiceClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + VertexRagDataServiceClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + VertexRagDataServiceClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + VertexRagDataServiceClient._get_universe_domain(None, None) + == VertexRagDataServiceClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + VertexRagDataServiceClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceGrpcTransport, + "grpc", + ), + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceRestTransport, + "rest", + ), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (VertexRagDataServiceClient, "grpc"), + (VertexRagDataServiceAsyncClient, "grpc_asyncio"), + (VertexRagDataServiceClient, "rest"), + ], +) +def test_vertex_rag_data_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.VertexRagDataServiceGrpcTransport, "grpc"), + (transports.VertexRagDataServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.VertexRagDataServiceRestTransport, "rest"), + ], +) +def test_vertex_rag_data_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (VertexRagDataServiceClient, "grpc"), + (VertexRagDataServiceAsyncClient, "grpc_asyncio"), + (VertexRagDataServiceClient, "rest"), + ], +) +def test_vertex_rag_data_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +def test_vertex_rag_data_service_client_get_transport_class(): + transport = VertexRagDataServiceClient.get_transport_class() + available_transports = [ + transports.VertexRagDataServiceGrpcTransport, + transports.VertexRagDataServiceRestTransport, + ] + assert transport in available_transports + + transport = VertexRagDataServiceClient.get_transport_class("grpc") + assert transport == transports.VertexRagDataServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceGrpcTransport, + "grpc", + ), + ( + VertexRagDataServiceAsyncClient, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceRestTransport, + "rest", + ), + ], +) +@mock.patch.object( + VertexRagDataServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagDataServiceClient), +) +@mock.patch.object( + VertexRagDataServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagDataServiceAsyncClient), +) +def test_vertex_rag_data_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(VertexRagDataServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(VertexRagDataServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceGrpcTransport, + "grpc", + "true", + ), + ( + VertexRagDataServiceAsyncClient, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceGrpcTransport, + "grpc", + "false", + ), + ( + VertexRagDataServiceAsyncClient, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceRestTransport, + "rest", + "true", + ), + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + VertexRagDataServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagDataServiceClient), +) +@mock.patch.object( + VertexRagDataServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagDataServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_vertex_rag_data_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [VertexRagDataServiceClient, VertexRagDataServiceAsyncClient] +) +@mock.patch.object( + VertexRagDataServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(VertexRagDataServiceClient), +) +@mock.patch.object( + VertexRagDataServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(VertexRagDataServiceAsyncClient), +) +def test_vertex_rag_data_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", [VertexRagDataServiceClient, VertexRagDataServiceAsyncClient] +) +@mock.patch.object( + VertexRagDataServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagDataServiceClient), +) +@mock.patch.object( + VertexRagDataServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagDataServiceAsyncClient), +) +def test_vertex_rag_data_service_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = VertexRagDataServiceClient._DEFAULT_UNIVERSE + default_endpoint = VertexRagDataServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = VertexRagDataServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceGrpcTransport, + "grpc", + ), + ( + VertexRagDataServiceAsyncClient, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceRestTransport, + "rest", + ), + ], +) +def test_vertex_rag_data_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + VertexRagDataServiceAsyncClient, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceRestTransport, + "rest", + None, + ), + ], +) +def test_vertex_rag_data_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_vertex_rag_data_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.transports.VertexRagDataServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = VertexRagDataServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + VertexRagDataServiceClient, + transports.VertexRagDataServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + VertexRagDataServiceAsyncClient, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_vertex_rag_data_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.CreateRagCorpusRequest, + dict, + ], +) +def test_create_rag_corpus(request_type, transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_rag_corpus), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_rag_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.CreateRagCorpusRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_rag_corpus_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_rag_corpus), "__call__" + ) as call: + client.create_rag_corpus() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.CreateRagCorpusRequest() + + +def test_create_rag_corpus_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = vertex_rag_data_service.CreateRagCorpusRequest( + parent="parent_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_rag_corpus), "__call__" + ) as call: + client.create_rag_corpus(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.CreateRagCorpusRequest( + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_create_rag_corpus_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_rag_corpus), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_rag_corpus() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.CreateRagCorpusRequest() + + +@pytest.mark.asyncio +async def test_create_rag_corpus_async( + transport: str = "grpc_asyncio", + request_type=vertex_rag_data_service.CreateRagCorpusRequest, +): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_rag_corpus), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_rag_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.CreateRagCorpusRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_rag_corpus_async_from_dict(): + await test_create_rag_corpus_async(request_type=dict) + + +def test_create_rag_corpus_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.CreateRagCorpusRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_rag_corpus), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_rag_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_rag_corpus_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.CreateRagCorpusRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_rag_corpus), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_rag_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_rag_corpus_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_rag_corpus), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_rag_corpus( + parent="parent_value", + rag_corpus=vertex_rag_data.RagCorpus(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].rag_corpus + mock_val = vertex_rag_data.RagCorpus(name="name_value") + assert arg == mock_val + + +def test_create_rag_corpus_flattened_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_rag_corpus( + vertex_rag_data_service.CreateRagCorpusRequest(), + parent="parent_value", + rag_corpus=vertex_rag_data.RagCorpus(name="name_value"), + ) + + +@pytest.mark.asyncio +async def test_create_rag_corpus_flattened_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_rag_corpus), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_rag_corpus( + parent="parent_value", + rag_corpus=vertex_rag_data.RagCorpus(name="name_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].rag_corpus + mock_val = vertex_rag_data.RagCorpus(name="name_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_rag_corpus_flattened_error_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_rag_corpus( + vertex_rag_data_service.CreateRagCorpusRequest(), + parent="parent_value", + rag_corpus=vertex_rag_data.RagCorpus(name="name_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.GetRagCorpusRequest, + dict, + ], +) +def test_get_rag_corpus(request_type, transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data.RagCorpus( + name="name_value", + display_name="display_name_value", + description="description_value", + ) + response = client.get_rag_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.GetRagCorpusRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, vertex_rag_data.RagCorpus) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + + +def test_get_rag_corpus_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call: + client.get_rag_corpus() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.GetRagCorpusRequest() + + +def test_get_rag_corpus_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = vertex_rag_data_service.GetRagCorpusRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call: + client.get_rag_corpus(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.GetRagCorpusRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_rag_corpus_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data.RagCorpus( + name="name_value", + display_name="display_name_value", + description="description_value", + ) + ) + response = await client.get_rag_corpus() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.GetRagCorpusRequest() + + +@pytest.mark.asyncio +async def test_get_rag_corpus_async( + transport: str = "grpc_asyncio", + request_type=vertex_rag_data_service.GetRagCorpusRequest, +): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data.RagCorpus( + name="name_value", + display_name="display_name_value", + description="description_value", + ) + ) + response = await client.get_rag_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.GetRagCorpusRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, vertex_rag_data.RagCorpus) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + + +@pytest.mark.asyncio +async def test_get_rag_corpus_async_from_dict(): + await test_get_rag_corpus_async(request_type=dict) + + +def test_get_rag_corpus_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.GetRagCorpusRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call: + call.return_value = vertex_rag_data.RagCorpus() + client.get_rag_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_rag_corpus_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.GetRagCorpusRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data.RagCorpus() + ) + await client.get_rag_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_rag_corpus_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data.RagCorpus() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_rag_corpus( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_rag_corpus_flattened_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_rag_corpus( + vertex_rag_data_service.GetRagCorpusRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_rag_corpus_flattened_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_corpus), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data.RagCorpus() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data.RagCorpus() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_rag_corpus( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_rag_corpus_flattened_error_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_rag_corpus( + vertex_rag_data_service.GetRagCorpusRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.ListRagCorporaRequest, + dict, + ], +) +def test_list_rag_corpora(request_type, transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data_service.ListRagCorporaResponse( + next_page_token="next_page_token_value", + ) + response = client.list_rag_corpora(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.ListRagCorporaRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListRagCorporaPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_rag_corpora_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call: + client.list_rag_corpora() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.ListRagCorporaRequest() + + +def test_list_rag_corpora_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = vertex_rag_data_service.ListRagCorporaRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call: + client.list_rag_corpora(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.ListRagCorporaRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +@pytest.mark.asyncio +async def test_list_rag_corpora_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data_service.ListRagCorporaResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_rag_corpora() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.ListRagCorporaRequest() + + +@pytest.mark.asyncio +async def test_list_rag_corpora_async( + transport: str = "grpc_asyncio", + request_type=vertex_rag_data_service.ListRagCorporaRequest, +): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data_service.ListRagCorporaResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_rag_corpora(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.ListRagCorporaRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListRagCorporaAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_rag_corpora_async_from_dict(): + await test_list_rag_corpora_async(request_type=dict) + + +def test_list_rag_corpora_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.ListRagCorporaRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call: + call.return_value = vertex_rag_data_service.ListRagCorporaResponse() + client.list_rag_corpora(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_rag_corpora_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.ListRagCorporaRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data_service.ListRagCorporaResponse() + ) + await client.list_rag_corpora(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_rag_corpora_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data_service.ListRagCorporaResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_rag_corpora( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_rag_corpora_flattened_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_rag_corpora( + vertex_rag_data_service.ListRagCorporaRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_rag_corpora_flattened_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data_service.ListRagCorporaResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data_service.ListRagCorporaResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_rag_corpora( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_rag_corpora_flattened_error_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_rag_corpora( + vertex_rag_data_service.ListRagCorporaRequest(), + parent="parent_value", + ) + + +def test_list_rag_corpora_pager(transport_name: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + ], + next_page_token="abc", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[], + next_page_token="def", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + ], + next_page_token="ghi", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_rag_corpora(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, vertex_rag_data.RagCorpus) for i in results) + + +def test_list_rag_corpora_pages(transport_name: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_corpora), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + ], + next_page_token="abc", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[], + next_page_token="def", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + ], + next_page_token="ghi", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + ], + ), + RuntimeError, + ) + pages = list(client.list_rag_corpora(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_rag_corpora_async_pager(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_rag_corpora), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + ], + next_page_token="abc", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[], + next_page_token="def", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + ], + next_page_token="ghi", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_rag_corpora( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, vertex_rag_data.RagCorpus) for i in responses) + + +@pytest.mark.asyncio +async def test_list_rag_corpora_async_pages(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_rag_corpora), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + ], + next_page_token="abc", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[], + next_page_token="def", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + ], + next_page_token="ghi", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_rag_corpora(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.DeleteRagCorpusRequest, + dict, + ], +) +def test_delete_rag_corpus(request_type, transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_rag_corpus), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_rag_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.DeleteRagCorpusRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_rag_corpus_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_rag_corpus), "__call__" + ) as call: + client.delete_rag_corpus() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.DeleteRagCorpusRequest() + + +def test_delete_rag_corpus_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = vertex_rag_data_service.DeleteRagCorpusRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_rag_corpus), "__call__" + ) as call: + client.delete_rag_corpus(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.DeleteRagCorpusRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_rag_corpus_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_rag_corpus), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_rag_corpus() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.DeleteRagCorpusRequest() + + +@pytest.mark.asyncio +async def test_delete_rag_corpus_async( + transport: str = "grpc_asyncio", + request_type=vertex_rag_data_service.DeleteRagCorpusRequest, +): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_rag_corpus), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_rag_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.DeleteRagCorpusRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_rag_corpus_async_from_dict(): + await test_delete_rag_corpus_async(request_type=dict) + + +def test_delete_rag_corpus_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.DeleteRagCorpusRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_rag_corpus), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_rag_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_rag_corpus_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.DeleteRagCorpusRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_rag_corpus), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_rag_corpus(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_rag_corpus_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_rag_corpus), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_rag_corpus( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_rag_corpus_flattened_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_rag_corpus( + vertex_rag_data_service.DeleteRagCorpusRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_rag_corpus_flattened_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_rag_corpus), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_rag_corpus( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_rag_corpus_flattened_error_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_rag_corpus( + vertex_rag_data_service.DeleteRagCorpusRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.UploadRagFileRequest, + dict, + ], +) +def test_upload_rag_file(request_type, transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data_service.UploadRagFileResponse() + response = client.upload_rag_file(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.UploadRagFileRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, vertex_rag_data_service.UploadRagFileResponse) + + +def test_upload_rag_file_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call: + client.upload_rag_file() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.UploadRagFileRequest() + + +def test_upload_rag_file_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = vertex_rag_data_service.UploadRagFileRequest( + parent="parent_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call: + client.upload_rag_file(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.UploadRagFileRequest( + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_upload_rag_file_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data_service.UploadRagFileResponse() + ) + response = await client.upload_rag_file() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.UploadRagFileRequest() + + +@pytest.mark.asyncio +async def test_upload_rag_file_async( + transport: str = "grpc_asyncio", + request_type=vertex_rag_data_service.UploadRagFileRequest, +): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data_service.UploadRagFileResponse() + ) + response = await client.upload_rag_file(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.UploadRagFileRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, vertex_rag_data_service.UploadRagFileResponse) + + +@pytest.mark.asyncio +async def test_upload_rag_file_async_from_dict(): + await test_upload_rag_file_async(request_type=dict) + + +def test_upload_rag_file_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.UploadRagFileRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call: + call.return_value = vertex_rag_data_service.UploadRagFileResponse() + client.upload_rag_file(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_upload_rag_file_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.UploadRagFileRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data_service.UploadRagFileResponse() + ) + await client.upload_rag_file(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_upload_rag_file_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data_service.UploadRagFileResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.upload_rag_file( + parent="parent_value", + rag_file=vertex_rag_data.RagFile( + gcs_source=io.GcsSource(uris=["uris_value"]) + ), + upload_rag_file_config=vertex_rag_data.UploadRagFileConfig( + rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig( + chunk_size=1075 + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].rag_file + mock_val = vertex_rag_data.RagFile(gcs_source=io.GcsSource(uris=["uris_value"])) + assert arg == mock_val + arg = args[0].upload_rag_file_config + mock_val = vertex_rag_data.UploadRagFileConfig( + rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig( + chunk_size=1075 + ) + ) + assert arg == mock_val + + +def test_upload_rag_file_flattened_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.upload_rag_file( + vertex_rag_data_service.UploadRagFileRequest(), + parent="parent_value", + rag_file=vertex_rag_data.RagFile( + gcs_source=io.GcsSource(uris=["uris_value"]) + ), + upload_rag_file_config=vertex_rag_data.UploadRagFileConfig( + rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig( + chunk_size=1075 + ) + ), + ) + + +@pytest.mark.asyncio +async def test_upload_rag_file_flattened_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.upload_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data_service.UploadRagFileResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data_service.UploadRagFileResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.upload_rag_file( + parent="parent_value", + rag_file=vertex_rag_data.RagFile( + gcs_source=io.GcsSource(uris=["uris_value"]) + ), + upload_rag_file_config=vertex_rag_data.UploadRagFileConfig( + rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig( + chunk_size=1075 + ) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].rag_file + mock_val = vertex_rag_data.RagFile(gcs_source=io.GcsSource(uris=["uris_value"])) + assert arg == mock_val + arg = args[0].upload_rag_file_config + mock_val = vertex_rag_data.UploadRagFileConfig( + rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig( + chunk_size=1075 + ) + ) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_upload_rag_file_flattened_error_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.upload_rag_file( + vertex_rag_data_service.UploadRagFileRequest(), + parent="parent_value", + rag_file=vertex_rag_data.RagFile( + gcs_source=io.GcsSource(uris=["uris_value"]) + ), + upload_rag_file_config=vertex_rag_data.UploadRagFileConfig( + rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig( + chunk_size=1075 + ) + ), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.ImportRagFilesRequest, + dict, + ], +) +def test_import_rag_files(request_type, transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.import_rag_files(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.ImportRagFilesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_import_rag_files_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call: + client.import_rag_files() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.ImportRagFilesRequest() + + +def test_import_rag_files_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = vertex_rag_data_service.ImportRagFilesRequest( + parent="parent_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call: + client.import_rag_files(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.ImportRagFilesRequest( + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_import_rag_files_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.import_rag_files() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.ImportRagFilesRequest() + + +@pytest.mark.asyncio +async def test_import_rag_files_async( + transport: str = "grpc_asyncio", + request_type=vertex_rag_data_service.ImportRagFilesRequest, +): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.import_rag_files(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.ImportRagFilesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_import_rag_files_async_from_dict(): + await test_import_rag_files_async(request_type=dict) + + +def test_import_rag_files_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.ImportRagFilesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.import_rag_files(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_import_rag_files_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.ImportRagFilesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.import_rag_files(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_import_rag_files_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.import_rag_files( + parent="parent_value", + import_rag_files_config=vertex_rag_data.ImportRagFilesConfig( + gcs_source=io.GcsSource(uris=["uris_value"]) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].import_rag_files_config + mock_val = vertex_rag_data.ImportRagFilesConfig( + gcs_source=io.GcsSource(uris=["uris_value"]) + ) + assert arg == mock_val + + +def test_import_rag_files_flattened_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_rag_files( + vertex_rag_data_service.ImportRagFilesRequest(), + parent="parent_value", + import_rag_files_config=vertex_rag_data.ImportRagFilesConfig( + gcs_source=io.GcsSource(uris=["uris_value"]) + ), + ) + + +@pytest.mark.asyncio +async def test_import_rag_files_flattened_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_rag_files), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.import_rag_files( + parent="parent_value", + import_rag_files_config=vertex_rag_data.ImportRagFilesConfig( + gcs_source=io.GcsSource(uris=["uris_value"]) + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].import_rag_files_config + mock_val = vertex_rag_data.ImportRagFilesConfig( + gcs_source=io.GcsSource(uris=["uris_value"]) + ) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_import_rag_files_flattened_error_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.import_rag_files( + vertex_rag_data_service.ImportRagFilesRequest(), + parent="parent_value", + import_rag_files_config=vertex_rag_data.ImportRagFilesConfig( + gcs_source=io.GcsSource(uris=["uris_value"]) + ), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.GetRagFileRequest, + dict, + ], +) +def test_get_rag_file(request_type, transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data.RagFile( + name="name_value", + display_name="display_name_value", + description="description_value", + size_bytes=1089, + rag_file_type=vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT, + ) + response = client.get_rag_file(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.GetRagFileRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, vertex_rag_data.RagFile) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.size_bytes == 1089 + assert ( + response.rag_file_type == vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT + ) + + +def test_get_rag_file_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call: + client.get_rag_file() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.GetRagFileRequest() + + +def test_get_rag_file_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = vertex_rag_data_service.GetRagFileRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call: + client.get_rag_file(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.GetRagFileRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_rag_file_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data.RagFile( + name="name_value", + display_name="display_name_value", + description="description_value", + size_bytes=1089, + rag_file_type=vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT, + ) + ) + response = await client.get_rag_file() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.GetRagFileRequest() + + +@pytest.mark.asyncio +async def test_get_rag_file_async( + transport: str = "grpc_asyncio", + request_type=vertex_rag_data_service.GetRagFileRequest, +): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data.RagFile( + name="name_value", + display_name="display_name_value", + description="description_value", + size_bytes=1089, + rag_file_type=vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT, + ) + ) + response = await client.get_rag_file(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.GetRagFileRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, vertex_rag_data.RagFile) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.size_bytes == 1089 + assert ( + response.rag_file_type == vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT + ) + + +@pytest.mark.asyncio +async def test_get_rag_file_async_from_dict(): + await test_get_rag_file_async(request_type=dict) + + +def test_get_rag_file_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.GetRagFileRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call: + call.return_value = vertex_rag_data.RagFile() + client.get_rag_file(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_rag_file_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.GetRagFileRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data.RagFile() + ) + await client.get_rag_file(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_rag_file_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data.RagFile() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_rag_file( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_rag_file_flattened_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_rag_file( + vertex_rag_data_service.GetRagFileRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_rag_file_flattened_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data.RagFile() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data.RagFile() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_rag_file( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_rag_file_flattened_error_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_rag_file( + vertex_rag_data_service.GetRagFileRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.ListRagFilesRequest, + dict, + ], +) +def test_list_rag_files(request_type, transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data_service.ListRagFilesResponse( + next_page_token="next_page_token_value", + ) + response = client.list_rag_files(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.ListRagFilesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListRagFilesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_rag_files_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call: + client.list_rag_files() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.ListRagFilesRequest() + + +def test_list_rag_files_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = vertex_rag_data_service.ListRagFilesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call: + client.list_rag_files(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.ListRagFilesRequest( + parent="parent_value", + page_token="page_token_value", + ) + + +@pytest.mark.asyncio +async def test_list_rag_files_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data_service.ListRagFilesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_rag_files() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.ListRagFilesRequest() + + +@pytest.mark.asyncio +async def test_list_rag_files_async( + transport: str = "grpc_asyncio", + request_type=vertex_rag_data_service.ListRagFilesRequest, +): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data_service.ListRagFilesResponse( + next_page_token="next_page_token_value", + ) + ) + response = await client.list_rag_files(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.ListRagFilesRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListRagFilesAsyncPager) + assert response.next_page_token == "next_page_token_value" + + +@pytest.mark.asyncio +async def test_list_rag_files_async_from_dict(): + await test_list_rag_files_async(request_type=dict) + + +def test_list_rag_files_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.ListRagFilesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call: + call.return_value = vertex_rag_data_service.ListRagFilesResponse() + client.list_rag_files(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_rag_files_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.ListRagFilesRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data_service.ListRagFilesResponse() + ) + await client.list_rag_files(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_list_rag_files_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data_service.ListRagFilesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_rag_files( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +def test_list_rag_files_flattened_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_rag_files( + vertex_rag_data_service.ListRagFilesRequest(), + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_list_rag_files_flattened_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_data_service.ListRagFilesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_data_service.ListRagFilesResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_rag_files( + parent="parent_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_list_rag_files_flattened_error_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_rag_files( + vertex_rag_data_service.ListRagFilesRequest(), + parent="parent_value", + ) + + +def test_list_rag_files_pager(transport_name: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + ], + next_page_token="abc", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[], + next_page_token="def", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + ], + next_page_token="ghi", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)), + ) + pager = client.list_rag_files(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, vertex_rag_data.RagFile) for i in results) + + +def test_list_rag_files_pages(transport_name: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_rag_files), "__call__") as call: + # Set the response to a series of pages. + call.side_effect = ( + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + ], + next_page_token="abc", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[], + next_page_token="def", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + ], + next_page_token="ghi", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + ], + ), + RuntimeError, + ) + pages = list(client.list_rag_files(request={}).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.asyncio +async def test_list_rag_files_async_pager(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_rag_files), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + ], + next_page_token="abc", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[], + next_page_token="def", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + ], + next_page_token="ghi", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_rag_files( + request={}, + ) + assert async_pager.next_page_token == "abc" + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, vertex_rag_data.RagFile) for i in responses) + + +@pytest.mark.asyncio +async def test_list_rag_files_async_pages(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_rag_files), "__call__", new_callable=mock.AsyncMock + ) as call: + # Set the response to a series of pages. + call.side_effect = ( + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + ], + next_page_token="abc", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[], + next_page_token="def", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + ], + next_page_token="ghi", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_rag_files(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.DeleteRagFileRequest, + dict, + ], +) +def test_delete_rag_file(request_type, transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.delete_rag_file(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.DeleteRagFileRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_delete_rag_file_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call: + client.delete_rag_file() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.DeleteRagFileRequest() + + +def test_delete_rag_file_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = vertex_rag_data_service.DeleteRagFileRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call: + client.delete_rag_file(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.DeleteRagFileRequest( + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_rag_file_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_rag_file() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_data_service.DeleteRagFileRequest() + + +@pytest.mark.asyncio +async def test_delete_rag_file_async( + transport: str = "grpc_asyncio", + request_type=vertex_rag_data_service.DeleteRagFileRequest, +): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.delete_rag_file(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = vertex_rag_data_service.DeleteRagFileRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_delete_rag_file_async_from_dict(): + await test_delete_rag_file_async(request_type=dict) + + +def test_delete_rag_file_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.DeleteRagFileRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.delete_rag_file(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_rag_file_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_data_service.DeleteRagFileRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.delete_rag_file(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_delete_rag_file_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_rag_file( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_delete_rag_file_flattened_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_rag_file( + vertex_rag_data_service.DeleteRagFileRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_delete_rag_file_flattened_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_rag_file), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_rag_file( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_delete_rag_file_flattened_error_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_rag_file( + vertex_rag_data_service.DeleteRagFileRequest(), + name="name_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.CreateRagCorpusRequest, + dict, + ], +) +def test_create_rag_corpus_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request_init["rag_corpus"] = { + "name": "name_value", + "display_name": "display_name_value", + "description": "description_value", + "create_time": {"seconds": 751, "nanos": 543}, + "update_time": {}, + } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = vertex_rag_data_service.CreateRagCorpusRequest.meta.fields[ + "rag_corpus" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["rag_corpus"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["rag_corpus"][field])): + del request_init["rag_corpus"][field][i][subfield] + else: + del request_init["rag_corpus"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.create_rag_corpus(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_rag_corpus_rest_required_fields( + request_type=vertex_rag_data_service.CreateRagCorpusRequest, +): + transport_class = transports.VertexRagDataServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_rag_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).create_rag_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.create_rag_corpus(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_create_rag_corpus_rest_unset_required_fields(): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.create_rag_corpus._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "ragCorpus", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_rag_corpus_rest_interceptors(null_interceptor): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.VertexRagDataServiceRestInterceptor(), + ) + client = VertexRagDataServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "post_create_rag_corpus" + ) as post, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "pre_create_rag_corpus" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = vertex_rag_data_service.CreateRagCorpusRequest.pb( + vertex_rag_data_service.CreateRagCorpusRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = vertex_rag_data_service.CreateRagCorpusRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_rag_corpus( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_rag_corpus_rest_bad_request( + transport: str = "rest", request_type=vertex_rag_data_service.CreateRagCorpusRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_rag_corpus(request) + + +def test_create_rag_corpus_rest_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + rag_corpus=vertex_rag_data.RagCorpus(name="name_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.create_rag_corpus(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*}/ragCorpora" + % client.transport._host, + args[1], + ) + + +def test_create_rag_corpus_rest_flattened_error(transport: str = "rest"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_rag_corpus( + vertex_rag_data_service.CreateRagCorpusRequest(), + parent="parent_value", + rag_corpus=vertex_rag_data.RagCorpus(name="name_value"), + ) + + +def test_create_rag_corpus_rest_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.GetRagCorpusRequest, + dict, + ], +) +def test_get_rag_corpus_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data.RagCorpus( + name="name_value", + display_name="display_name_value", + description="description_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vertex_rag_data.RagCorpus.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_rag_corpus(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, vertex_rag_data.RagCorpus) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + + +def test_get_rag_corpus_rest_required_fields( + request_type=vertex_rag_data_service.GetRagCorpusRequest, +): + transport_class = transports.VertexRagDataServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_rag_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_rag_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data.RagCorpus() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = vertex_rag_data.RagCorpus.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_rag_corpus(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_rag_corpus_rest_unset_required_fields(): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_rag_corpus._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_rag_corpus_rest_interceptors(null_interceptor): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.VertexRagDataServiceRestInterceptor(), + ) + client = VertexRagDataServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "post_get_rag_corpus" + ) as post, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "pre_get_rag_corpus" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = vertex_rag_data_service.GetRagCorpusRequest.pb( + vertex_rag_data_service.GetRagCorpusRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = vertex_rag_data.RagCorpus.to_json( + vertex_rag_data.RagCorpus() + ) + + request = vertex_rag_data_service.GetRagCorpusRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = vertex_rag_data.RagCorpus() + + client.get_rag_corpus( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_rag_corpus_rest_bad_request( + transport: str = "rest", request_type=vertex_rag_data_service.GetRagCorpusRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_rag_corpus(request) + + +def test_get_rag_corpus_rest_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data.RagCorpus() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/ragCorpora/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vertex_rag_data.RagCorpus.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_rag_corpus(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/ragCorpora/*}" + % client.transport._host, + args[1], + ) + + +def test_get_rag_corpus_rest_flattened_error(transport: str = "rest"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_rag_corpus( + vertex_rag_data_service.GetRagCorpusRequest(), + name="name_value", + ) + + +def test_get_rag_corpus_rest_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.ListRagCorporaRequest, + dict, + ], +) +def test_list_rag_corpora_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data_service.ListRagCorporaResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vertex_rag_data_service.ListRagCorporaResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_rag_corpora(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListRagCorporaPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_rag_corpora_rest_required_fields( + request_type=vertex_rag_data_service.ListRagCorporaRequest, +): + transport_class = transports.VertexRagDataServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_rag_corpora._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_rag_corpora._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data_service.ListRagCorporaResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = vertex_rag_data_service.ListRagCorporaResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_rag_corpora(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_rag_corpora_rest_unset_required_fields(): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_rag_corpora._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_rag_corpora_rest_interceptors(null_interceptor): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.VertexRagDataServiceRestInterceptor(), + ) + client = VertexRagDataServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "post_list_rag_corpora" + ) as post, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "pre_list_rag_corpora" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = vertex_rag_data_service.ListRagCorporaRequest.pb( + vertex_rag_data_service.ListRagCorporaRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + vertex_rag_data_service.ListRagCorporaResponse.to_json( + vertex_rag_data_service.ListRagCorporaResponse() + ) + ) + + request = vertex_rag_data_service.ListRagCorporaRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = vertex_rag_data_service.ListRagCorporaResponse() + + client.list_rag_corpora( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_rag_corpora_rest_bad_request( + transport: str = "rest", request_type=vertex_rag_data_service.ListRagCorporaRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_rag_corpora(request) + + +def test_list_rag_corpora_rest_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data_service.ListRagCorporaResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vertex_rag_data_service.ListRagCorporaResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_rag_corpora(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*}/ragCorpora" + % client.transport._host, + args[1], + ) + + +def test_list_rag_corpora_rest_flattened_error(transport: str = "rest"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_rag_corpora( + vertex_rag_data_service.ListRagCorporaRequest(), + parent="parent_value", + ) + + +def test_list_rag_corpora_rest_pager(transport: str = "rest"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + ], + next_page_token="abc", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[], + next_page_token="def", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + ], + next_page_token="ghi", + ), + vertex_rag_data_service.ListRagCorporaResponse( + rag_corpora=[ + vertex_rag_data.RagCorpus(), + vertex_rag_data.RagCorpus(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + vertex_rag_data_service.ListRagCorporaResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"parent": "projects/sample1/locations/sample2"} + + pager = client.list_rag_corpora(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, vertex_rag_data.RagCorpus) for i in results) + + pages = list(client.list_rag_corpora(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.DeleteRagCorpusRequest, + dict, + ], +) +def test_delete_rag_corpus_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_rag_corpus(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_rag_corpus_rest_required_fields( + request_type=vertex_rag_data_service.DeleteRagCorpusRequest, +): + transport_class = transports.VertexRagDataServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_rag_corpus._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_rag_corpus._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("force",)) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_rag_corpus(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_rag_corpus_rest_unset_required_fields(): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_rag_corpus._get_unset_required_fields({}) + assert set(unset_fields) == (set(("force",)) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_rag_corpus_rest_interceptors(null_interceptor): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.VertexRagDataServiceRestInterceptor(), + ) + client = VertexRagDataServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "post_delete_rag_corpus" + ) as post, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "pre_delete_rag_corpus" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = vertex_rag_data_service.DeleteRagCorpusRequest.pb( + vertex_rag_data_service.DeleteRagCorpusRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = vertex_rag_data_service.DeleteRagCorpusRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_rag_corpus( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_rag_corpus_rest_bad_request( + transport: str = "rest", request_type=vertex_rag_data_service.DeleteRagCorpusRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"name": "projects/sample1/locations/sample2/ragCorpora/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_rag_corpus(request) + + +def test_delete_rag_corpus_rest_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/ragCorpora/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_rag_corpus(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/ragCorpora/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_rag_corpus_rest_flattened_error(transport: str = "rest"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_rag_corpus( + vertex_rag_data_service.DeleteRagCorpusRequest(), + name="name_value", + ) + + +def test_delete_rag_corpus_rest_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.UploadRagFileRequest, + dict, + ], +) +def test_upload_rag_file_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data_service.UploadRagFileResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vertex_rag_data_service.UploadRagFileResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.upload_rag_file(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, vertex_rag_data_service.UploadRagFileResponse) + + +def test_upload_rag_file_rest_required_fields( + request_type=vertex_rag_data_service.UploadRagFileRequest, +): + transport_class = transports.VertexRagDataServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).upload_rag_file._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).upload_rag_file._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data_service.UploadRagFileResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = vertex_rag_data_service.UploadRagFileResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.upload_rag_file(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_upload_rag_file_rest_unset_required_fields(): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.upload_rag_file._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "ragFile", + "uploadRagFileConfig", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_upload_rag_file_rest_interceptors(null_interceptor): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.VertexRagDataServiceRestInterceptor(), + ) + client = VertexRagDataServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "post_upload_rag_file" + ) as post, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "pre_upload_rag_file" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = vertex_rag_data_service.UploadRagFileRequest.pb( + vertex_rag_data_service.UploadRagFileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + vertex_rag_data_service.UploadRagFileResponse.to_json( + vertex_rag_data_service.UploadRagFileResponse() + ) + ) + + request = vertex_rag_data_service.UploadRagFileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = vertex_rag_data_service.UploadRagFileResponse() + + client.upload_rag_file( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_upload_rag_file_rest_bad_request( + transport: str = "rest", request_type=vertex_rag_data_service.UploadRagFileRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.upload_rag_file(request) + + +def test_upload_rag_file_rest_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data_service.UploadRagFileResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/locations/sample2/ragCorpora/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + rag_file=vertex_rag_data.RagFile( + gcs_source=io.GcsSource(uris=["uris_value"]) + ), + upload_rag_file_config=vertex_rag_data.UploadRagFileConfig( + rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig( + chunk_size=1075 + ) + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vertex_rag_data_service.UploadRagFileResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.upload_rag_file(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*/ragCorpora/*}/ragFiles:upload" + % client.transport._host, + args[1], + ) + + +def test_upload_rag_file_rest_flattened_error(transport: str = "rest"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.upload_rag_file( + vertex_rag_data_service.UploadRagFileRequest(), + parent="parent_value", + rag_file=vertex_rag_data.RagFile( + gcs_source=io.GcsSource(uris=["uris_value"]) + ), + upload_rag_file_config=vertex_rag_data.UploadRagFileConfig( + rag_file_chunking_config=vertex_rag_data.RagFileChunkingConfig( + chunk_size=1075 + ) + ), + ) + + +def test_upload_rag_file_rest_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.ImportRagFilesRequest, + dict, + ], +) +def test_import_rag_files_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.import_rag_files(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_import_rag_files_rest_required_fields( + request_type=vertex_rag_data_service.ImportRagFilesRequest, +): + transport_class = transports.VertexRagDataServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).import_rag_files._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).import_rag_files._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.import_rag_files(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_import_rag_files_rest_unset_required_fields(): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.import_rag_files._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "importRagFilesConfig", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_import_rag_files_rest_interceptors(null_interceptor): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.VertexRagDataServiceRestInterceptor(), + ) + client = VertexRagDataServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "post_import_rag_files" + ) as post, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "pre_import_rag_files" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = vertex_rag_data_service.ImportRagFilesRequest.pb( + vertex_rag_data_service.ImportRagFilesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = vertex_rag_data_service.ImportRagFilesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.import_rag_files( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_import_rag_files_rest_bad_request( + transport: str = "rest", request_type=vertex_rag_data_service.ImportRagFilesRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.import_rag_files(request) + + +def test_import_rag_files_rest_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/locations/sample2/ragCorpora/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + import_rag_files_config=vertex_rag_data.ImportRagFilesConfig( + gcs_source=io.GcsSource(uris=["uris_value"]) + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.import_rag_files(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*/ragCorpora/*}/ragFiles:import" + % client.transport._host, + args[1], + ) + + +def test_import_rag_files_rest_flattened_error(transport: str = "rest"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.import_rag_files( + vertex_rag_data_service.ImportRagFilesRequest(), + parent="parent_value", + import_rag_files_config=vertex_rag_data.ImportRagFilesConfig( + gcs_source=io.GcsSource(uris=["uris_value"]) + ), + ) + + +def test_import_rag_files_rest_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.GetRagFileRequest, + dict, + ], +) +def test_get_rag_file_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data.RagFile( + name="name_value", + display_name="display_name_value", + description="description_value", + size_bytes=1089, + rag_file_type=vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vertex_rag_data.RagFile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.get_rag_file(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, vertex_rag_data.RagFile) + assert response.name == "name_value" + assert response.display_name == "display_name_value" + assert response.description == "description_value" + assert response.size_bytes == 1089 + assert ( + response.rag_file_type == vertex_rag_data.RagFile.RagFileType.RAG_FILE_TYPE_TXT + ) + + +def test_get_rag_file_rest_required_fields( + request_type=vertex_rag_data_service.GetRagFileRequest, +): + transport_class = transports.VertexRagDataServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_rag_file._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).get_rag_file._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data.RagFile() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = vertex_rag_data.RagFile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_rag_file(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_get_rag_file_rest_unset_required_fields(): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.get_rag_file._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_rag_file_rest_interceptors(null_interceptor): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.VertexRagDataServiceRestInterceptor(), + ) + client = VertexRagDataServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "post_get_rag_file" + ) as post, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "pre_get_rag_file" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = vertex_rag_data_service.GetRagFileRequest.pb( + vertex_rag_data_service.GetRagFileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = vertex_rag_data.RagFile.to_json( + vertex_rag_data.RagFile() + ) + + request = vertex_rag_data_service.GetRagFileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = vertex_rag_data.RagFile() + + client.get_rag_file( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_rag_file_rest_bad_request( + transport: str = "rest", request_type=vertex_rag_data_service.GetRagFileRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_rag_file(request) + + +def test_get_rag_file_rest_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data.RagFile() + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vertex_rag_data.RagFile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.get_rag_file(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}" + % client.transport._host, + args[1], + ) + + +def test_get_rag_file_rest_flattened_error(transport: str = "rest"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_rag_file( + vertex_rag_data_service.GetRagFileRequest(), + name="name_value", + ) + + +def test_get_rag_file_rest_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.ListRagFilesRequest, + dict, + ], +) +def test_list_rag_files_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data_service.ListRagFilesResponse( + next_page_token="next_page_token_value", + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vertex_rag_data_service.ListRagFilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.list_rag_files(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListRagFilesPager) + assert response.next_page_token == "next_page_token_value" + + +def test_list_rag_files_rest_required_fields( + request_type=vertex_rag_data_service.ListRagFilesRequest, +): + transport_class = transports.VertexRagDataServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_rag_files._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).list_rag_files._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set( + ( + "page_size", + "page_token", + ) + ) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data_service.ListRagFilesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "get", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = vertex_rag_data_service.ListRagFilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_rag_files(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_list_rag_files_rest_unset_required_fields(): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.list_rag_files._get_unset_required_fields({}) + assert set(unset_fields) == ( + set( + ( + "pageSize", + "pageToken", + ) + ) + & set(("parent",)) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_rag_files_rest_interceptors(null_interceptor): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.VertexRagDataServiceRestInterceptor(), + ) + client = VertexRagDataServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "post_list_rag_files" + ) as post, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "pre_list_rag_files" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = vertex_rag_data_service.ListRagFilesRequest.pb( + vertex_rag_data_service.ListRagFilesRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = ( + vertex_rag_data_service.ListRagFilesResponse.to_json( + vertex_rag_data_service.ListRagFilesResponse() + ) + ) + + request = vertex_rag_data_service.ListRagFilesRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = vertex_rag_data_service.ListRagFilesResponse() + + client.list_rag_files( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_rag_files_rest_bad_request( + transport: str = "rest", request_type=vertex_rag_data_service.ListRagFilesRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2/ragCorpora/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_rag_files(request) + + +def test_list_rag_files_rest_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vertex_rag_data_service.ListRagFilesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/locations/sample2/ragCorpora/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vertex_rag_data_service.ListRagFilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.list_rag_files(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*/ragCorpora/*}/ragFiles" + % client.transport._host, + args[1], + ) + + +def test_list_rag_files_rest_flattened_error(transport: str = "rest"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_rag_files( + vertex_rag_data_service.ListRagFilesRequest(), + parent="parent_value", + ) + + +def test_list_rag_files_rest_pager(transport: str = "rest"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + ], + next_page_token="abc", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[], + next_page_token="def", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + ], + next_page_token="ghi", + ), + vertex_rag_data_service.ListRagFilesResponse( + rag_files=[ + vertex_rag_data.RagFile(), + vertex_rag_data.RagFile(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + vertex_rag_data_service.ListRagFilesResponse.to_json(x) for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/locations/sample2/ragCorpora/sample3" + } + + pager = client.list_rag_files(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, vertex_rag_data.RagFile) for i in results) + + pages = list(client.list_rag_files(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_data_service.DeleteRagFileRequest, + dict, + ], +) +def test_delete_rag_file_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.delete_rag_file(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_delete_rag_file_rest_required_fields( + request_type=vertex_rag_data_service.DeleteRagFileRequest, +): + transport_class = transports.VertexRagDataServiceRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_rag_file._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = "name_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).delete_rag_file._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == "name_value" + + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "delete", + "query_params": pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_rag_file(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_delete_rag_file_rest_unset_required_fields(): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.delete_rag_file._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_rag_file_rest_interceptors(null_interceptor): + transport = transports.VertexRagDataServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.VertexRagDataServiceRestInterceptor(), + ) + client = VertexRagDataServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "post_delete_rag_file" + ) as post, mock.patch.object( + transports.VertexRagDataServiceRestInterceptor, "pre_delete_rag_file" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = vertex_rag_data_service.DeleteRagFileRequest.pb( + vertex_rag_data_service.DeleteRagFileRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = vertex_rag_data_service.DeleteRagFileRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.delete_rag_file( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_delete_rag_file_rest_bad_request( + transport: str = "rest", request_type=vertex_rag_data_service.DeleteRagFileRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_rag_file(request) + + +def test_delete_rag_file_rest_flattened(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "name": "projects/sample1/locations/sample2/ragCorpora/sample3/ragFiles/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + name="name_value", + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.delete_rag_file(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{name=projects/*/locations/*/ragCorpora/*/ragFiles/*}" + % client.transport._host, + args[1], + ) + + +def test_delete_rag_file_rest_flattened_error(transport: str = "rest"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_rag_file( + vertex_rag_data_service.DeleteRagFileRequest(), + name="name_value", + ) + + +def test_delete_rag_file_rest_error(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.VertexRagDataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.VertexRagDataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VertexRagDataServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.VertexRagDataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = VertexRagDataServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = VertexRagDataServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.VertexRagDataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VertexRagDataServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.VertexRagDataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = VertexRagDataServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.VertexRagDataServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.VertexRagDataServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.VertexRagDataServiceGrpcTransport, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + transports.VertexRagDataServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = VertexRagDataServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.VertexRagDataServiceGrpcTransport, + ) + + +def test_vertex_rag_data_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.VertexRagDataServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_vertex_rag_data_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.transports.VertexRagDataServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.VertexRagDataServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_rag_corpus", + "get_rag_corpus", + "list_rag_corpora", + "delete_rag_corpus", + "upload_rag_file", + "import_rag_files", + "get_rag_file", + "list_rag_files", + "delete_rag_file", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + "get_location", + "list_locations", + "get_operation", + "wait_operation", + "cancel_operation", + "delete_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_vertex_rag_data_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.transports.VertexRagDataServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VertexRagDataServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_vertex_rag_data_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.vertex_rag_data_service.transports.VertexRagDataServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VertexRagDataServiceTransport() + adc.assert_called_once() + + +def test_vertex_rag_data_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + VertexRagDataServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.VertexRagDataServiceGrpcTransport, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + ], +) +def test_vertex_rag_data_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.VertexRagDataServiceGrpcTransport, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + transports.VertexRagDataServiceRestTransport, + ], +) +def test_vertex_rag_data_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.VertexRagDataServiceGrpcTransport, grpc_helpers), + (transports.VertexRagDataServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_vertex_rag_data_service_transport_create_channel( + transport_class, grpc_helpers +): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.VertexRagDataServiceGrpcTransport, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + ], +) +def test_vertex_rag_data_service_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_vertex_rag_data_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.VertexRagDataServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_vertex_rag_data_service_rest_lro_client(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_vertex_rag_data_service_host_no_port(transport_name): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_vertex_rag_data_service_host_with_port(transport_name): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_vertex_rag_data_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = VertexRagDataServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = VertexRagDataServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_rag_corpus._session + session2 = client2.transport.create_rag_corpus._session + assert session1 != session2 + session1 = client1.transport.get_rag_corpus._session + session2 = client2.transport.get_rag_corpus._session + assert session1 != session2 + session1 = client1.transport.list_rag_corpora._session + session2 = client2.transport.list_rag_corpora._session + assert session1 != session2 + session1 = client1.transport.delete_rag_corpus._session + session2 = client2.transport.delete_rag_corpus._session + assert session1 != session2 + session1 = client1.transport.upload_rag_file._session + session2 = client2.transport.upload_rag_file._session + assert session1 != session2 + session1 = client1.transport.import_rag_files._session + session2 = client2.transport.import_rag_files._session + assert session1 != session2 + session1 = client1.transport.get_rag_file._session + session2 = client2.transport.get_rag_file._session + assert session1 != session2 + session1 = client1.transport.list_rag_files._session + session2 = client2.transport.list_rag_files._session + assert session1 != session2 + session1 = client1.transport.delete_rag_file._session + session2 = client2.transport.delete_rag_file._session + assert session1 != session2 + + +def test_vertex_rag_data_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.VertexRagDataServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_vertex_rag_data_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.VertexRagDataServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.VertexRagDataServiceGrpcTransport, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + ], +) +def test_vertex_rag_data_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.VertexRagDataServiceGrpcTransport, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + ], +) +def test_vertex_rag_data_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_vertex_rag_data_service_grpc_lro_client(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_vertex_rag_data_service_grpc_lro_async_client(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_rag_corpus_path(): + project = "squid" + location = "clam" + rag_corpus = "whelk" + expected = "projects/{project}/locations/{location}/ragCorpora/{rag_corpus}".format( + project=project, + location=location, + rag_corpus=rag_corpus, + ) + actual = VertexRagDataServiceClient.rag_corpus_path(project, location, rag_corpus) + assert expected == actual + + +def test_parse_rag_corpus_path(): + expected = { + "project": "octopus", + "location": "oyster", + "rag_corpus": "nudibranch", + } + path = VertexRagDataServiceClient.rag_corpus_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagDataServiceClient.parse_rag_corpus_path(path) + assert expected == actual + + +def test_rag_file_path(): + project = "cuttlefish" + location = "mussel" + rag_corpus = "winkle" + rag_file = "nautilus" + expected = "projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}".format( + project=project, + location=location, + rag_corpus=rag_corpus, + rag_file=rag_file, + ) + actual = VertexRagDataServiceClient.rag_file_path( + project, location, rag_corpus, rag_file + ) + assert expected == actual + + +def test_parse_rag_file_path(): + expected = { + "project": "scallop", + "location": "abalone", + "rag_corpus": "squid", + "rag_file": "clam", + } + path = VertexRagDataServiceClient.rag_file_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagDataServiceClient.parse_rag_file_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = VertexRagDataServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = VertexRagDataServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagDataServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = VertexRagDataServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = VertexRagDataServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagDataServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = VertexRagDataServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = VertexRagDataServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagDataServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format( + project=project, + ) + actual = VertexRagDataServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = VertexRagDataServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagDataServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = VertexRagDataServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = VertexRagDataServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagDataServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.VertexRagDataServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.VertexRagDataServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = VertexRagDataServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_get_location_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.GetLocationRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_location(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.GetLocationRequest, + dict, + ], +) +def test_get_location_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.Location() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_location(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_list_locations_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.ListLocationsRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({"name": "projects/sample1"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_locations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.ListLocationsRequest, + dict, + ], +) +def test_list_locations_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_locations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_get_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_set_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_test_iam_permissions_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + +def test_cancel_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.CancelOperationRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.DeleteOperationRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.DeleteOperationRequest, + dict, + ], +) +def test_delete_operation_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.ListOperationsRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_wait_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.WaitOperationRequest +): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.WaitOperationRequest, + dict, + ], +) +def test_wait_operation_rest(request_type): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.wait_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_delete_operation(transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_delete_operation_from_dict(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_wait_operation(transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc_asyncio"): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_wait_operation_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_wait_operation_from_dict(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_list_locations_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_locations_from_dict(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_get_location_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +def test_get_location_from_dict(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +def test_get_iam_policy(transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_test_iam_permissions(transport: str = "grpc"): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = VertexRagDataServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = VertexRagDataServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (VertexRagDataServiceClient, transports.VertexRagDataServiceGrpcTransport), + ( + VertexRagDataServiceAsyncClient, + transports.VertexRagDataServiceGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py new file mode 100644 index 0000000000..67a9b32b53 --- /dev/null +++ b/tests/unit/gapic/aiplatform_v1beta1/test_vertex_rag_service.py @@ -0,0 +1,4578 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from google.api_core import api_core_version +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.aiplatform_v1beta1.services.vertex_rag_service import ( + VertexRagServiceAsyncClient, +) +from google.cloud.aiplatform_v1beta1.services.vertex_rag_service import ( + VertexRagServiceClient, +) +from google.cloud.aiplatform_v1beta1.services.vertex_rag_service import transports +from google.cloud.aiplatform_v1beta1.types import vertex_rag_service +from google.cloud.location import locations_pb2 +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert VertexRagServiceClient._get_default_mtls_endpoint(None) is None + assert ( + VertexRagServiceClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + VertexRagServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + VertexRagServiceClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + VertexRagServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + VertexRagServiceClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +def test__read_environment_variables(): + assert VertexRagServiceClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert VertexRagServiceClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert VertexRagServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + VertexRagServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert VertexRagServiceClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert VertexRagServiceClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert VertexRagServiceClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + VertexRagServiceClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert VertexRagServiceClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert VertexRagServiceClient._get_client_cert_source(None, False) is None + assert ( + VertexRagServiceClient._get_client_cert_source(mock_provided_cert_source, False) + is None + ) + assert ( + VertexRagServiceClient._get_client_cert_source(mock_provided_cert_source, True) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + VertexRagServiceClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + VertexRagServiceClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + VertexRagServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagServiceClient), +) +@mock.patch.object( + VertexRagServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagServiceAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = VertexRagServiceClient._DEFAULT_UNIVERSE + default_endpoint = VertexRagServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = VertexRagServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + VertexRagServiceClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + VertexRagServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == VertexRagServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + VertexRagServiceClient._get_api_endpoint(None, None, default_universe, "auto") + == default_endpoint + ) + assert ( + VertexRagServiceClient._get_api_endpoint(None, None, default_universe, "always") + == VertexRagServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + VertexRagServiceClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == VertexRagServiceClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + VertexRagServiceClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + VertexRagServiceClient._get_api_endpoint(None, None, default_universe, "never") + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + VertexRagServiceClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + VertexRagServiceClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + VertexRagServiceClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + VertexRagServiceClient._get_universe_domain(None, None) + == VertexRagServiceClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + VertexRagServiceClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (VertexRagServiceClient, transports.VertexRagServiceGrpcTransport, "grpc"), + (VertexRagServiceClient, transports.VertexRagServiceRestTransport, "rest"), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # TODO: This is needed to cater for older versions of google-auth + # Make this test unconditional once the minimum supported version of + # google-auth becomes 2.23.0 or higher. + google_auth_major, google_auth_minor = [ + int(part) for part in google.auth.__version__.split(".")[0:2] + ] + if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): + credentials = ga_credentials.AnonymousCredentials() + credentials._universe_domain = "foo.com" + # Test the case when there is a universe mismatch from the credentials. + client = client_class(transport=transport_class(credentials=credentials)) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor = [ + int(part) for part in api_core_version.__version__.split(".")[0:2] + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=ga_credentials.AnonymousCredentials(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test that ValueError is raised if universe_domain is provided via client options and credentials is None + with pytest.raises(ValueError): + client._compare_universes("foo.bar", None) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (VertexRagServiceClient, "grpc"), + (VertexRagServiceAsyncClient, "grpc_asyncio"), + (VertexRagServiceClient, "rest"), + ], +) +def test_vertex_rag_service_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.VertexRagServiceGrpcTransport, "grpc"), + (transports.VertexRagServiceGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.VertexRagServiceRestTransport, "rest"), + ], +) +def test_vertex_rag_service_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (VertexRagServiceClient, "grpc"), + (VertexRagServiceAsyncClient, "grpc_asyncio"), + (VertexRagServiceClient, "rest"), + ], +) +def test_vertex_rag_service_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +def test_vertex_rag_service_client_get_transport_class(): + transport = VertexRagServiceClient.get_transport_class() + available_transports = [ + transports.VertexRagServiceGrpcTransport, + transports.VertexRagServiceRestTransport, + ] + assert transport in available_transports + + transport = VertexRagServiceClient.get_transport_class("grpc") + assert transport == transports.VertexRagServiceGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (VertexRagServiceClient, transports.VertexRagServiceGrpcTransport, "grpc"), + ( + VertexRagServiceAsyncClient, + transports.VertexRagServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (VertexRagServiceClient, transports.VertexRagServiceRestTransport, "rest"), + ], +) +@mock.patch.object( + VertexRagServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagServiceClient), +) +@mock.patch.object( + VertexRagServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagServiceAsyncClient), +) +def test_vertex_rag_service_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(VertexRagServiceClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(VertexRagServiceClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + VertexRagServiceClient, + transports.VertexRagServiceGrpcTransport, + "grpc", + "true", + ), + ( + VertexRagServiceAsyncClient, + transports.VertexRagServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + VertexRagServiceClient, + transports.VertexRagServiceGrpcTransport, + "grpc", + "false", + ), + ( + VertexRagServiceAsyncClient, + transports.VertexRagServiceGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ( + VertexRagServiceClient, + transports.VertexRagServiceRestTransport, + "rest", + "true", + ), + ( + VertexRagServiceClient, + transports.VertexRagServiceRestTransport, + "rest", + "false", + ), + ], +) +@mock.patch.object( + VertexRagServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagServiceClient), +) +@mock.patch.object( + VertexRagServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagServiceAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_vertex_rag_service_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [VertexRagServiceClient, VertexRagServiceAsyncClient] +) +@mock.patch.object( + VertexRagServiceClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(VertexRagServiceClient), +) +@mock.patch.object( + VertexRagServiceAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(VertexRagServiceAsyncClient), +) +def test_vertex_rag_service_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", [VertexRagServiceClient, VertexRagServiceAsyncClient] +) +@mock.patch.object( + VertexRagServiceClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagServiceClient), +) +@mock.patch.object( + VertexRagServiceAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(VertexRagServiceAsyncClient), +) +def test_vertex_rag_service_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = VertexRagServiceClient._DEFAULT_UNIVERSE + default_endpoint = VertexRagServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = VertexRagServiceClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=ga_credentials.AnonymousCredentials(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=ga_credentials.AnonymousCredentials()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + else: + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + assert client.api_endpoint == default_endpoint + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (VertexRagServiceClient, transports.VertexRagServiceGrpcTransport, "grpc"), + ( + VertexRagServiceAsyncClient, + transports.VertexRagServiceGrpcAsyncIOTransport, + "grpc_asyncio", + ), + (VertexRagServiceClient, transports.VertexRagServiceRestTransport, "rest"), + ], +) +def test_vertex_rag_service_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + VertexRagServiceClient, + transports.VertexRagServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + VertexRagServiceAsyncClient, + transports.VertexRagServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ( + VertexRagServiceClient, + transports.VertexRagServiceRestTransport, + "rest", + None, + ), + ], +) +def test_vertex_rag_service_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_vertex_rag_service_client_client_options_from_dict(): + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.vertex_rag_service.transports.VertexRagServiceGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = VertexRagServiceClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + VertexRagServiceClient, + transports.VertexRagServiceGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + VertexRagServiceAsyncClient, + transports.VertexRagServiceGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_vertex_rag_service_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_service.RetrieveContextsRequest, + dict, + ], +) +def test_retrieve_contexts(request_type, transport: str = "grpc"): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.retrieve_contexts), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_service.RetrieveContextsResponse() + response = client.retrieve_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = vertex_rag_service.RetrieveContextsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, vertex_rag_service.RetrieveContextsResponse) + + +def test_retrieve_contexts_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.retrieve_contexts), "__call__" + ) as call: + client.retrieve_contexts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_service.RetrieveContextsRequest() + + +def test_retrieve_contexts_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = vertex_rag_service.RetrieveContextsRequest( + parent="parent_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.retrieve_contexts), "__call__" + ) as call: + client.retrieve_contexts(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_service.RetrieveContextsRequest( + parent="parent_value", + ) + + +@pytest.mark.asyncio +async def test_retrieve_contexts_empty_call_async(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.retrieve_contexts), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_service.RetrieveContextsResponse() + ) + response = await client.retrieve_contexts() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == vertex_rag_service.RetrieveContextsRequest() + + +@pytest.mark.asyncio +async def test_retrieve_contexts_async( + transport: str = "grpc_asyncio", + request_type=vertex_rag_service.RetrieveContextsRequest, +): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.retrieve_contexts), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_service.RetrieveContextsResponse() + ) + response = await client.retrieve_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = vertex_rag_service.RetrieveContextsRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, vertex_rag_service.RetrieveContextsResponse) + + +@pytest.mark.asyncio +async def test_retrieve_contexts_async_from_dict(): + await test_retrieve_contexts_async(request_type=dict) + + +def test_retrieve_contexts_field_headers(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_service.RetrieveContextsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.retrieve_contexts), "__call__" + ) as call: + call.return_value = vertex_rag_service.RetrieveContextsResponse() + client.retrieve_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_retrieve_contexts_field_headers_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = vertex_rag_service.RetrieveContextsRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.retrieve_contexts), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_service.RetrieveContextsResponse() + ) + await client.retrieve_contexts(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_retrieve_contexts_flattened(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.retrieve_contexts), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_service.RetrieveContextsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.retrieve_contexts( + parent="parent_value", + query=vertex_rag_service.RagQuery(text="text_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].query + mock_val = vertex_rag_service.RagQuery(text="text_value") + assert arg == mock_val + + +def test_retrieve_contexts_flattened_error(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.retrieve_contexts( + vertex_rag_service.RetrieveContextsRequest(), + parent="parent_value", + query=vertex_rag_service.RagQuery(text="text_value"), + ) + + +@pytest.mark.asyncio +async def test_retrieve_contexts_flattened_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.retrieve_contexts), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = vertex_rag_service.RetrieveContextsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + vertex_rag_service.RetrieveContextsResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.retrieve_contexts( + parent="parent_value", + query=vertex_rag_service.RagQuery(text="text_value"), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].query + mock_val = vertex_rag_service.RagQuery(text="text_value") + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_retrieve_contexts_flattened_error_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.retrieve_contexts( + vertex_rag_service.RetrieveContextsRequest(), + parent="parent_value", + query=vertex_rag_service.RagQuery(text="text_value"), + ) + + +@pytest.mark.parametrize( + "request_type", + [ + vertex_rag_service.RetrieveContextsRequest, + dict, + ], +) +def test_retrieve_contexts_rest(request_type): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vertex_rag_service.RetrieveContextsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vertex_rag_service.RetrieveContextsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.retrieve_contexts(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, vertex_rag_service.RetrieveContextsResponse) + + +def test_retrieve_contexts_rest_required_fields( + request_type=vertex_rag_service.RetrieveContextsRequest, +): + transport_class = transports.VertexRagServiceRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).retrieve_contexts._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).retrieve_contexts._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = vertex_rag_service.RetrieveContextsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = vertex_rag_service.RetrieveContextsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.retrieve_contexts(request) + + expected_params = [] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_retrieve_contexts_rest_unset_required_fields(): + transport = transports.VertexRagServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.retrieve_contexts._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "query", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_retrieve_contexts_rest_interceptors(null_interceptor): + transport = transports.VertexRagServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.VertexRagServiceRestInterceptor(), + ) + client = VertexRagServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.VertexRagServiceRestInterceptor, "post_retrieve_contexts" + ) as post, mock.patch.object( + transports.VertexRagServiceRestInterceptor, "pre_retrieve_contexts" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = vertex_rag_service.RetrieveContextsRequest.pb( + vertex_rag_service.RetrieveContextsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = vertex_rag_service.RetrieveContextsResponse.to_json( + vertex_rag_service.RetrieveContextsResponse() + ) + + request = vertex_rag_service.RetrieveContextsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = vertex_rag_service.RetrieveContextsResponse() + + client.retrieve_contexts( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_retrieve_contexts_rest_bad_request( + transport: str = "rest", request_type=vertex_rag_service.RetrieveContextsRequest +): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.retrieve_contexts(request) + + +def test_retrieve_contexts_rest_flattened(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = vertex_rag_service.RetrieveContextsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {"parent": "projects/sample1/locations/sample2"} + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + query=vertex_rag_service.RagQuery(text="text_value"), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = vertex_rag_service.RetrieveContextsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.retrieve_contexts(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta1/{parent=projects/*/locations/*}:retrieveContexts" + % client.transport._host, + args[1], + ) + + +def test_retrieve_contexts_rest_flattened_error(transport: str = "rest"): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.retrieve_contexts( + vertex_rag_service.RetrieveContextsRequest(), + parent="parent_value", + query=vertex_rag_service.RagQuery(text="text_value"), + ) + + +def test_retrieve_contexts_rest_error(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.VertexRagServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.VertexRagServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VertexRagServiceClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.VertexRagServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = VertexRagServiceClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = VertexRagServiceClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.VertexRagServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VertexRagServiceClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.VertexRagServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = VertexRagServiceClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.VertexRagServiceGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.VertexRagServiceGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.VertexRagServiceGrpcTransport, + transports.VertexRagServiceGrpcAsyncIOTransport, + transports.VertexRagServiceRestTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "rest", + ], +) +def test_transport_kind(transport_name): + transport = VertexRagServiceClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.VertexRagServiceGrpcTransport, + ) + + +def test_vertex_rag_service_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.VertexRagServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_vertex_rag_service_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.aiplatform_v1beta1.services.vertex_rag_service.transports.VertexRagServiceTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.VertexRagServiceTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "retrieve_contexts", + "set_iam_policy", + "get_iam_policy", + "test_iam_permissions", + "get_location", + "list_locations", + "get_operation", + "wait_operation", + "cancel_operation", + "delete_operation", + "list_operations", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_vertex_rag_service_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.aiplatform_v1beta1.services.vertex_rag_service.transports.VertexRagServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VertexRagServiceTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_vertex_rag_service_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.aiplatform_v1beta1.services.vertex_rag_service.transports.VertexRagServiceTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VertexRagServiceTransport() + adc.assert_called_once() + + +def test_vertex_rag_service_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + VertexRagServiceClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.VertexRagServiceGrpcTransport, + transports.VertexRagServiceGrpcAsyncIOTransport, + ], +) +def test_vertex_rag_service_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.VertexRagServiceGrpcTransport, + transports.VertexRagServiceGrpcAsyncIOTransport, + transports.VertexRagServiceRestTransport, + ], +) +def test_vertex_rag_service_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.VertexRagServiceGrpcTransport, grpc_helpers), + (transports.VertexRagServiceGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_vertex_rag_service_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "aiplatform.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="aiplatform.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.VertexRagServiceGrpcTransport, + transports.VertexRagServiceGrpcAsyncIOTransport, + ], +) +def test_vertex_rag_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +def test_vertex_rag_service_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ) as mock_configure_mtls_channel: + transports.VertexRagServiceRestTransport( + credentials=cred, client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_vertex_rag_service_host_no_port(transport_name): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:443" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + "rest", + ], +) +def test_vertex_rag_service_host_with_port(transport_name): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="aiplatform.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ( + "aiplatform.googleapis.com:8000" + if transport_name in ["grpc", "grpc_asyncio"] + else "https://aiplatform.googleapis.com:8000" + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "rest", + ], +) +def test_vertex_rag_service_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = VertexRagServiceClient( + credentials=creds1, + transport=transport_name, + ) + client2 = VertexRagServiceClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.retrieve_contexts._session + session2 = client2.transport.retrieve_contexts._session + assert session1 != session2 + + +def test_vertex_rag_service_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.VertexRagServiceGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_vertex_rag_service_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.VertexRagServiceGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.VertexRagServiceGrpcTransport, + transports.VertexRagServiceGrpcAsyncIOTransport, + ], +) +def test_vertex_rag_service_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.VertexRagServiceGrpcTransport, + transports.VertexRagServiceGrpcAsyncIOTransport, + ], +) +def test_vertex_rag_service_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = VertexRagServiceClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = VertexRagServiceClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagServiceClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = VertexRagServiceClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = VertexRagServiceClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagServiceClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = VertexRagServiceClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = VertexRagServiceClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagServiceClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format( + project=project, + ) + actual = VertexRagServiceClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = VertexRagServiceClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagServiceClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = VertexRagServiceClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = VertexRagServiceClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = VertexRagServiceClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.VertexRagServiceTransport, "_prep_wrapped_messages" + ) as prep: + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.VertexRagServiceTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = VertexRagServiceClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_get_location_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.GetLocationRequest +): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_location(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.GetLocationRequest, + dict, + ], +) +def test_get_location_rest(request_type): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.Location() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_location(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_list_locations_rest_bad_request( + transport: str = "rest", request_type=locations_pb2.ListLocationsRequest +): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict({"name": "projects/sample1"}, request) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_locations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + locations_pb2.ListLocationsRequest, + dict, + ], +) +def test_list_locations_rest(request_type): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = locations_pb2.ListLocationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_locations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_get_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest +): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.GetIamPolicyRequest, + dict, + ], +) +def test_get_iam_policy_rest(request_type): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_set_iam_policy_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest +): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.SetIamPolicyRequest, + dict, + ], +) +def test_set_iam_policy_rest(request_type): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + +def test_test_iam_permissions_rest_bad_request( + transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest +): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"resource": "projects/sample1/locations/sample2/featurestores/sample3"}, + request, + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +@pytest.mark.parametrize( + "request_type", + [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, + ], +) +def test_test_iam_permissions_rest(request_type): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = { + "resource": "projects/sample1/locations/sample2/featurestores/sample3" + } + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + +def test_cancel_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.CancelOperationRequest +): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.cancel_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.CancelOperationRequest, + dict, + ], +) +def test_cancel_operation_rest(request_type): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.cancel_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.DeleteOperationRequest +): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.DeleteOperationRequest, + dict, + ], +) +def test_delete_operation_rest(request_type): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = "{}" + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.delete_operation(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_get_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.GetOperationRequest +): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.GetOperationRequest, + dict, + ], +) +def test_get_operation_rest(request_type): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.get_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_list_operations_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.ListOperationsRequest +): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_operations(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.ListOperationsRequest, + dict, + ], +) +def test_list_operations_rest(request_type): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.ListOperationsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.list_operations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_wait_operation_rest_bad_request( + transport: str = "rest", request_type=operations_pb2.WaitOperationRequest +): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + request = request_type() + request = json_format.ParseDict( + {"name": "projects/sample1/locations/sample2/operations/sample3"}, request + ) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait_operation(request) + + +@pytest.mark.parametrize( + "request_type", + [ + operations_pb2.WaitOperationRequest, + dict, + ], +) +def test_wait_operation_rest(request_type): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request_init = {"name": "projects/sample1/locations/sample2/operations/sample3"} + request = request_type(**request_init) + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.wait_operation(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_delete_operation(transport: str = "grpc"): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_operation_async(transport: str = "grpc_asyncio"): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.DeleteOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_operation_field_headers(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = None + + client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_delete_operation_field_headers_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.DeleteOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_delete_operation_from_dict(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_delete_operation_from_dict_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_cancel_operation(transport: str = "grpc"): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = "grpc_asyncio"): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.CancelOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_field_headers(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = None + + client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.CancelOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_cancel_operation_from_dict(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = None + + response = client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_cancel_operation_from_dict_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_wait_operation(transport: str = "grpc"): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_wait_operation(transport: str = "grpc_asyncio"): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.WaitOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_wait_operation_field_headers(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_wait_operation_field_headers_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.WaitOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.wait_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_wait_operation_from_dict(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_wait_operation_from_dict_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.wait_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.wait_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_operation(transport: str = "grpc"): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + response = client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = "grpc_asyncio"): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.GetOperationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.Operation) + + +def test_get_operation_field_headers(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = operations_pb2.Operation() + + client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.GetOperationRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + await client.get_operation(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_get_operation_from_dict(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation() + + response = client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_operation_from_dict_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_operation), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation() + ) + response = await client.get_operation( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_operations(transport: str = "grpc"): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + response = client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = "grpc_asyncio"): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = operations_pb2.ListOperationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, operations_pb2.ListOperationsResponse) + + +def test_list_operations_field_headers(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = operations_pb2.ListOperationsResponse() + + client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = operations_pb2.ListOperationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + await client.list_operations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_operations_from_dict(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.ListOperationsResponse() + + response = client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_operations_from_dict_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_operations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.ListOperationsResponse() + ) + response = await client.list_operations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_list_locations(transport: str = "grpc"): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + response = client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = "grpc_asyncio"): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.ListLocationsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.ListLocationsResponse) + + +def test_list_locations_field_headers(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = locations_pb2.ListLocationsResponse() + + client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.ListLocationsRequest() + request.name = "locations" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + await client.list_locations(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations", + ) in kw["metadata"] + + +def test_list_locations_from_dict(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.ListLocationsResponse() + + response = client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_list_locations_from_dict_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.ListLocationsResponse() + ) + response = await client.list_locations( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_get_location(transport: str = "grpc"): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + response = client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +@pytest.mark.asyncio +async def test_get_location_async(transport: str = "grpc_asyncio"): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = locations_pb2.GetLocationRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, locations_pb2.Location) + + +def test_get_location_field_headers(): + client = VertexRagServiceClient(credentials=ga_credentials.AnonymousCredentials()) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = locations_pb2.Location() + + client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_location_field_headers_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials() + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = locations_pb2.GetLocationRequest() + request.name = "locations/abc" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_location), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + await client.get_location(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=locations/abc", + ) in kw["metadata"] + + +def test_get_location_from_dict(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = locations_pb2.Location() + + response = client.get_location( + request={ + "name": "locations/abc", + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_location_from_dict_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_locations), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + locations_pb2.Location() + ) + response = await client.get_location( + request={ + "name": "locations", + } + ) + call.assert_called() + + +def test_set_iam_policy(transport: str = "grpc"): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + response = client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = "grpc_asyncio"): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.SetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + response = await client.set_iam_policy(request) + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_set_iam_policy_field_headers(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_set_iam_policy_from_dict(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_set_iam_policy_from_dict_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.set_iam_policy( + request={ + "resource": "resource_value", + "policy": policy_pb2.Policy(version=774), + } + ) + call.assert_called() + + +def test_get_iam_policy(transport: str = "grpc"): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = "grpc_asyncio"): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.GetIamPolicyRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + policy_pb2.Policy( + version=774, + etag=b"etag_blob", + ) + ) + + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + + assert response.version == 774 + + assert response.etag == b"etag_blob" + + +def test_get_iam_policy_field_headers(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = policy_pb2.Policy() + + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_get_iam_policy_from_dict(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + response = client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_get_iam_policy_from_dict_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + + response = await client.get_iam_policy( + request={ + "resource": "resource_value", + "options": options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_test_iam_permissions(transport: str = "grpc"): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = "grpc_asyncio"): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = iam_policy_pb2.TestIamPermissionsRequest() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse( + permissions=["permissions_value"], + ) + ) + + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + + assert response.permissions == ["permissions_value"] + + +def test_test_iam_permissions_field_headers(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + request.resource = "resource/value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "resource=resource/value", + ) in kw["metadata"] + + +def test_test_iam_permissions_from_dict(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + response = client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +@pytest.mark.asyncio +async def test_test_iam_permissions_from_dict_async(): + client = VertexRagServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + iam_policy_pb2.TestIamPermissionsResponse() + ) + + response = await client.test_iam_permissions( + request={ + "resource": "resource_value", + "permissions": ["permissions_value"], + } + ) + call.assert_called() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "rest", + "grpc", + ] + for transport in transports: + client = VertexRagServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (VertexRagServiceClient, transports.VertexRagServiceGrpcTransport), + (VertexRagServiceAsyncClient, transports.VertexRagServiceGrpcAsyncIOTransport), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/tests/unit/vertexai/test_evaluation.py b/tests/unit/vertexai/test_evaluation.py new file mode 100644 index 0000000000..c330506792 --- /dev/null +++ b/tests/unit/vertexai/test_evaluation.py @@ -0,0 +1,440 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from unittest import mock + +from google.cloud import aiplatform +import vertexai +from google.cloud.aiplatform import initializer +from google.cloud.aiplatform.metadata import metadata +from google.cloud.aiplatform_v1beta1.services import ( + evaluation_service as gapic_evaluation_services, +) +from google.cloud.aiplatform_v1beta1.types import ( + evaluation_service as gapic_evaluation_service_types, +) +from vertexai.preview import evaluation +from vertexai.preview.evaluation import utils +import pandas as pd +import pytest + + +_TEST_PROJECT = "test-project" +_TEST_LOCATION = "us-central1" +_TEST_METRICS = [ + "exact_match", + "bleu", + "rouge_1", + "rouge_2", + "rouge_l", + "rouge_l_sum", + "coherence", + "fluency", + "safety", + "groundedness", + "fulfillment", + "summarization_quality", + "summarization_helpfulness", + "summarization_verbosity", + "question_answering_quality", + "question_answering_relevance", + "question_answering_helpfulness", + "question_answering_correctness", +] +_TEST_EVAL_DATASET = pd.DataFrame( + { + "response": ["test", "text"], + "reference": ["test", "ref"], + "context": ["test", "context"], + "instruction": ["test", "instruction"], + } +) +_TEST_EVAL_DATASET_WITHOUT_RESPONSE = pd.DataFrame( + { + "reference": ["test", "ref"], + "context": ["test", "context"], + "instruction": ["test", "instruction"], + } +) + +_TEST_JSONL_FILE_CONTENT = """{"prompt": "prompt", "reference": "reference"}\n +{"prompt":"test", "reference": "test"}\n +""" +_TEST_CSV_FILE_CONTENT = """reference,context,instruction\ntest,test,test\n +text,text,text\n +""" + + +_MOCK_EXACT_MATCH_RESULT = [ + gapic_evaluation_service_types.EvaluateInstancesResponse( + exact_match_results=gapic_evaluation_service_types.ExactMatchResults( + exact_match_metric_values=[ + gapic_evaluation_service_types.ExactMatchMetricValue(score=1.0), + ] + ) + ), + gapic_evaluation_service_types.EvaluateInstancesResponse( + exact_match_results=gapic_evaluation_service_types.ExactMatchResults( + exact_match_metric_values=[ + gapic_evaluation_service_types.ExactMatchMetricValue(score=0.0), + ] + ) + ), +] + +_MOCK_FLUENCY_RESULT = [ + gapic_evaluation_service_types.EvaluateInstancesResponse( + fluency_result=gapic_evaluation_service_types.FluencyResult( + score=5, explanation="explanation", confidence=1.0 + ) + ), + gapic_evaluation_service_types.EvaluateInstancesResponse( + fluency_result=gapic_evaluation_service_types.FluencyResult( + score=4, explanation="explanation", confidence=0.5 + ) + ), +] + + +@pytest.fixture +def mock_async_event_loop(): + with mock.patch("asyncio.get_event_loop") as mock_async_event_loop: + yield mock_async_event_loop + + +@pytest.fixture +def mock_experiment_tracker(): + with mock.patch.object( + metadata, "_experiment_tracker", autospec=True + ) as mock_experiment_tracker: + yield mock_experiment_tracker + + +@pytest.mark.usefixtures("google_auth_mock") +class TestEvaluation: + def setup_method(self): + vertexai.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + ) + + def teardown_method(self): + initializer.global_pool.shutdown(wait=True) + + def test_create_eval_task(self): + test_experiment = "test_experiment_name" + test_content_column_name = "test_content_column_name" + test_reference_column_name = "test_reference_column_name" + test_response_column_name = "test_response_column_name" + + test_eval_task = evaluation.EvalTask( + dataset=_TEST_EVAL_DATASET, + metrics=_TEST_METRICS, + experiment=test_experiment, + content_column_name=test_content_column_name, + reference_column_name=test_reference_column_name, + response_column_name=test_response_column_name, + ) + + assert test_eval_task.dataset.equals(_TEST_EVAL_DATASET) + assert test_eval_task.metrics == _TEST_METRICS + assert test_eval_task.experiment == test_experiment + assert test_eval_task.content_column_name == test_content_column_name + assert test_eval_task.reference_column_name == test_reference_column_name + assert test_eval_task.response_column_name == test_response_column_name + + def test_evaluate_saved_response(self, mock_async_event_loop): + eval_dataset = _TEST_EVAL_DATASET + test_metrics = _TEST_METRICS + mock_summary_metrics = { + "row_count": 2, + "mock_metric/mean": 0.5, + "mock_metric/std": 0.5, + } + mock_metrics_table = pd.DataFrame( + { + "response": ["test", "text"], + "reference": ["test", "ref"], + "mock_metric": [1.0, 0.0], + } + ) + mock_async_event_loop.return_value.run_until_complete.return_value = ( + mock_summary_metrics, + mock_metrics_table, + ) + + test_eval_task = evaluation.EvalTask(dataset=eval_dataset, metrics=test_metrics) + test_result = test_eval_task.evaluate() + + assert test_result.summary_metrics == mock_summary_metrics + assert test_result.metrics_table.equals(mock_metrics_table) + + @pytest.mark.parametrize("api_transport", ["grpc", "rest"]) + def test_compute_automatic_metrics(self, api_transport): + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + api_transport=api_transport, + ) + eval_dataset = pd.DataFrame( + { + "response": ["test", "text"], + "reference": ["test", "ref"], + } + ) + test_metrics = ["exact_match"] + test_eval_task = evaluation.EvalTask(dataset=eval_dataset, metrics=test_metrics) + mock_metric_results = _MOCK_EXACT_MATCH_RESULT + with mock.patch.object( + target=gapic_evaluation_services.EvaluationServiceAsyncClient, + attribute="evaluate_instances", + side_effect=mock_metric_results, + ): + test_result = test_eval_task.evaluate() + + assert test_result.summary_metrics["row_count"] == 2 + assert test_result.summary_metrics["exact_match/mean"] == 0.5 + assert test_result.summary_metrics["exact_match/std"] == pytest.approx(0.7, 0.1) + assert list(test_result.metrics_table.columns.values) == [ + "response", + "reference", + "exact_match", + ] + assert test_result.metrics_table[["response", "reference"]].equals(eval_dataset) + assert list(test_result.metrics_table["exact_match"].values) == [1.0, 0.0] + + @pytest.mark.parametrize("api_transport", ["grpc", "rest"]) + def test_compute_pointwise_metrics(self, api_transport): + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + api_transport=api_transport, + ) + eval_dataset = pd.DataFrame( + { + "response": ["test", "text"], + } + ) + test_metrics = ["fluency"] + test_eval_task = evaluation.EvalTask(dataset=eval_dataset, metrics=test_metrics) + mock_metric_results = _MOCK_FLUENCY_RESULT + with mock.patch.object( + target=gapic_evaluation_services.EvaluationServiceAsyncClient, + attribute="evaluate_instances", + side_effect=mock_metric_results, + ): + test_result = test_eval_task.evaluate() + + assert test_result.summary_metrics["row_count"] == 2 + assert test_result.summary_metrics["fluency/mean"] == 4.5 + assert test_result.summary_metrics["fluency/std"] == pytest.approx(0.7, 0.1) + assert set(test_result.metrics_table.columns.values) == set( + [ + "response", + "fluency", + "fluency/explanation", + "fluency/confidence", + ] + ) + assert test_result.metrics_table[["response"]].equals(eval_dataset) + assert list(test_result.metrics_table["fluency"].values) == [5, 4] + assert list(test_result.metrics_table["fluency/explanation"].values) == [ + "explanation", + "explanation", + ] + assert list(test_result.metrics_table["fluency/confidence"].values) == [ + 1.0, + 0.5, + ] + + +@pytest.mark.usefixtures("google_auth_mock") +class TestEvaluationErrors: + def setup_method(self): + vertexai.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + ) + + def teardown_method(self): + initializer.global_pool.shutdown(wait=True) + + def test_evaluate_empty_metrics(self): + test_eval_task = evaluation.EvalTask(dataset=_TEST_EVAL_DATASET, metrics=[]) + with pytest.raises(ValueError, match="Metrics cannot be empty."): + test_eval_task.evaluate() + + def test_evaluate_invalid_metrics(self): + metric_name = "invalid_metric" + test_eval_task = evaluation.EvalTask( + dataset=_TEST_EVAL_DATASET, metrics=[metric_name] + ) + with pytest.raises( + ValueError, match=f"Metric name: {metric_name} not supported." + ): + test_eval_task.evaluate() + + def test_evaluate_invalid_experiment_run_name(self): + test_eval_task = evaluation.EvalTask( + dataset=_TEST_EVAL_DATASET, metrics=_TEST_METRICS + ) + with pytest.raises(ValueError, match="Experiment is not set"): + test_eval_task.evaluate(experiment_run_name="invalid_experiment_run_name") + + with pytest.raises(ValueError, match="Experiment is not set"): + test_eval_task.display_runs() + + def test_evaluate_experiment_name_already_exists(self, mock_experiment_tracker): + test_eval_task = evaluation.EvalTask( + dataset=_TEST_EVAL_DATASET, + metrics=_TEST_METRICS, + experiment="test_eval_experiment_name", + ) + mock_experiment_tracker.experiment_run.return_value = "experiment_run_1" + with pytest.raises(ValueError, match="Experiment run already exists"): + test_eval_task.evaluate(experiment_run_name="experiment_run_2") + + def test_evaluate_invalid_dataset_content_column(self): + test_eval_task = evaluation.EvalTask( + dataset=_TEST_EVAL_DATASET_WITHOUT_RESPONSE, + metrics=_TEST_METRICS, + ) + with pytest.raises(KeyError, match="Required column `content` not found"): + test_eval_task.evaluate(model=mock.MagicMock()) + + def test_evaluate_invalid_prompt_template_placeholder(self): + test_eval_task = evaluation.EvalTask( + dataset=_TEST_EVAL_DATASET_WITHOUT_RESPONSE, + metrics=_TEST_METRICS, + ) + with pytest.raises(ValueError, match="Failed to complete prompt template"): + test_eval_task.evaluate( + prompt_template="test_prompt_template {invalid_placeholder}", + ) + + +@pytest.mark.usefixtures("google_auth_mock") +class TestEvaluationUtils: + def setup_method(self): + vertexai.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + ) + + def teardown_method(self): + initializer.global_pool.shutdown(wait=True) + + def test_create_evaluation_service_async_client(self): + client = utils.create_evaluation_service_async_client() + assert isinstance(client, utils._EvaluationServiceAsyncClientWithOverride) + + def test_load_dataset_from_dataframe(self): + data = {"col1": [1, 2], "col2": ["a", "b"]} + df = pd.DataFrame(data) + loaded_df = utils.load_dataset(df) + assert loaded_df.equals(df) + + def test_load_dataset_from_dict(self): + data = {"col1": [1, 2], "col2": ["a", "b"]} + loaded_df = utils.load_dataset(data) + assert isinstance(loaded_df, pd.DataFrame) + assert loaded_df.to_dict("list") == data + + def test_load_dataset_from_gcs_jsonl(self): + source = "gs://test_bucket/test_file.jsonl" + with mock.patch.object( + utils, + "_read_gcs_file_contents", + return_value=_TEST_JSONL_FILE_CONTENT, + ): + loaded_df = utils.load_dataset(source) + + assert isinstance(loaded_df, pd.DataFrame) + assert loaded_df.to_dict("list") == { + "prompt": ["prompt", "test"], + "reference": ["reference", "test"], + } + + def test_load_dataset_from_gcs_csv(self): + source = "gs://test_bucket/test_file.csv" + with mock.patch.object( + utils, "_read_gcs_file_contents", return_value=_TEST_CSV_FILE_CONTENT + ): + loaded_df = utils.load_dataset(source) + + assert isinstance(loaded_df, pd.DataFrame) + assert loaded_df.to_dict("list") == { + "reference": ["test", "text"], + "context": ["test", "text"], + "instruction": ["test", "text"], + } + + def test_load_dataset_from_bigquery(self): + source = "bq://project-id.dataset.table_name" + with mock.patch.object( + utils, "_load_bigquery", return_value=_TEST_EVAL_DATASET + ): + loaded_df = utils.load_dataset(source) + + assert isinstance(loaded_df, pd.DataFrame) + assert loaded_df.equals(_TEST_EVAL_DATASET) + + +class TestPromptTemplate: + def test_init(self): + template_str = "Hello, {name}!" + prompt_template = evaluation.PromptTemplate(template_str) + assert prompt_template.template == template_str + + def test_get_placeholders(self): + template_str = "Hello, {name}! Today is {day}." + prompt_template = evaluation.PromptTemplate(template_str) + assert prompt_template.placeholders == {"name", "day"} + + def test_format(self): + template_str = "Hello, {name}! Today is {day}." + prompt_template = evaluation.PromptTemplate(template_str) + completed_prompt = prompt_template.assemble(name="John", day="Monday") + assert str(completed_prompt) == "Hello, John! Today is Monday." + + def test_format_missing_placeholder(self): + template_str = "Hello, {name}!" + prompt_template = evaluation.PromptTemplate(template_str) + completed_prompt = prompt_template.assemble() + assert str(completed_prompt) == "Hello, {name}!" + assert prompt_template.placeholders == {"name"} + + def test_partial_format(self): + template_str = "Hello, {name}! Today is {day}." + prompt_template = evaluation.PromptTemplate(template_str) + partially_completed_prompt = prompt_template.assemble(name="John") + + assert isinstance(partially_completed_prompt, evaluation.PromptTemplate) + assert str(partially_completed_prompt) == "Hello, John! Today is {day}." + assert partially_completed_prompt.placeholders == {"day"} + + completed_prompt = partially_completed_prompt.assemble(day="Monday") + assert str(completed_prompt) == "Hello, John! Today is Monday." + + def test_str(self): + template_str = "Hello, world!" + prompt_template = evaluation.PromptTemplate(template_str) + assert str(prompt_template) == template_str + + def test_repr(self): + template_str = "Hello, {name}!" + prompt_template = evaluation.PromptTemplate(template_str) + assert repr(prompt_template) == f"PromptTemplate('{template_str}')" diff --git a/tests/unit/vertexai/test_extensions.py b/tests/unit/vertexai/test_extensions.py new file mode 100644 index 0000000000..3bda374c3e --- /dev/null +++ b/tests/unit/vertexai/test_extensions.py @@ -0,0 +1,375 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import importlib +import json +from unittest import mock + +from google import auth +from google.api_core import operation as ga_operation +from google.auth import credentials as auth_credentials +from google.cloud import aiplatform +from google.cloud.aiplatform import initializer +from google.cloud.aiplatform import utils as aip_utils +from google.cloud.aiplatform_v1beta1 import types +from google.cloud.aiplatform_v1beta1.services import extension_execution_service +from google.cloud.aiplatform_v1beta1.services import extension_registry_service +from vertexai.preview import extensions +from vertexai.reasoning_engines import _utils +import pytest + + +_TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials()) +_TEST_AUTH_CONFIG = types.AuthConfig(auth_type="GOOGLE_SERVICE_ACCOUNT_AUTH") +_TEST_RESOURCE_ID = "1028944691210842416" +_TEST_OPEN_API_GCS_URI = "gs://vertex-extension-experiment/code_interpreter.yaml" +_TEST_OPEN_API_YAML = """ + openapi: 3.0.0 + info: + title: SomeApi + version: 1.0.0 + servers: + - url: https://www.someapi.com + paths: + /path1: + get: + summary: Request description + operationId: requestSomething + parameters: + - name: request_parameter + in: query + required: true + schema: + type: string + responses: + '200': + description: Response description + content: + application/json: + schema: + type: object + properties: + response_parameter: + type: string""" +_TEST_EXTENSION_MANIFEST_NAME = "code_interpreter_tool" +_TEST_EXTENSION_MANIFEST_DESCRIPTION = "Google Code Interpreter Extension" +_TEST_EXTENSION_MANIFEST_WITH_GCS_URI_OBJ = types.ExtensionManifest( + name=_TEST_EXTENSION_MANIFEST_NAME, + description=_TEST_EXTENSION_MANIFEST_DESCRIPTION, + api_spec=types.ExtensionManifest.ApiSpec( + open_api_gcs_uri=_TEST_OPEN_API_GCS_URI, + ), + auth_config=_TEST_AUTH_CONFIG, +) +_TEST_EXTENSION_MANIFEST_WITH_YAML_OBJ = types.ExtensionManifest( + name=_TEST_EXTENSION_MANIFEST_NAME, + description=_TEST_EXTENSION_MANIFEST_DESCRIPTION, + api_spec=types.ExtensionManifest.ApiSpec( + open_api_yaml=_TEST_OPEN_API_YAML, + ), + auth_config=_TEST_AUTH_CONFIG, +) +_TEST_EXTENSION_MANIFEST_WITH_NO_API_SPEC = types.ExtensionManifest( + name=_TEST_EXTENSION_MANIFEST_NAME, + description=_TEST_EXTENSION_MANIFEST_DESCRIPTION, + auth_config=_TEST_AUTH_CONFIG, +) +_TEST_LOCATION = "us-central1" +_TEST_PROJECT = "test-project" +_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}" +_TEST_EXTENSION_RESOURCE_NAME = f"{_TEST_PARENT}/extensions/{_TEST_RESOURCE_ID}" +_TEST_EXTENSION_DISPLAY_NAME = "Extension Display Name" +_TEST_EXTENSION_OBJ = types.Extension( + name=_TEST_EXTENSION_RESOURCE_NAME, + display_name=_TEST_EXTENSION_DISPLAY_NAME, + manifest=_TEST_EXTENSION_MANIFEST_WITH_GCS_URI_OBJ, +) +_TEST_EXTENSION_WITH_YAML_API_SPEC_OBJ = types.Extension( + name=_TEST_EXTENSION_RESOURCE_NAME, + display_name=_TEST_EXTENSION_DISPLAY_NAME, + manifest=_TEST_EXTENSION_MANIFEST_WITH_YAML_OBJ, +) +_TEST_EXTENSION_WITH_NO_API_SPEC_OBJ = types.Extension( + name=_TEST_EXTENSION_RESOURCE_NAME, + display_name=_TEST_EXTENSION_DISPLAY_NAME, + manifest=_TEST_EXTENSION_MANIFEST_WITH_NO_API_SPEC, +) +_TEST_EXTENSION_OPERATION_ID = "search" +_TEST_QUERY_PROMPT = "Find the first fibonacci number greater than 999" +_TEST_EXTENSION_OPERATION_PARAMS = {"query": _TEST_QUERY_PROMPT} +_TEST_RESPONSE_CONTENT = json.dumps( + { + "execution_error": "", + "execution_result": "The first fibonacci number greater than 999 is 1597\n", + "generated_code": "```python\n" + "def fibonacci(n):\n" + " a, b = 0, 1\n" + " for _ in range(n):\n" + " a, b = b, a + b\n" + " return a\n" + "\n" + "# Find the first fibonacci number greater than 999\n" + "n = 1\n" + "while fibonacci(n) <= 999:\n" + " n += 1\n" + "\n" + 'print(f"The first fibonacci number greater than 999 is ' + '{fibonacci(n)}")\n' + "```", + "output_files": [], + } +) +_TEST_EXECUTE_EXTENSION_RESPONSE = types.ExecuteExtensionResponse( + content=_TEST_RESPONSE_CONTENT, +) + + +@pytest.fixture(scope="module") +def google_auth_mock(): + with mock.patch.object(auth, "default") as google_auth_mock: + google_auth_mock.return_value = ( + auth_credentials.AnonymousCredentials(), + _TEST_PROJECT, + ) + yield google_auth_mock + + +@pytest.fixture +def get_extension_mock(): + with mock.patch.object( + extension_registry_service.ExtensionRegistryServiceClient, + "get_extension", + ) as get_extension_mock: + api_client_mock = mock.Mock( + spec=extension_registry_service.ExtensionRegistryServiceClient, + ) + api_client_mock.get_extension.return_value = _TEST_EXTENSION_OBJ + get_extension_mock.return_value = api_client_mock + yield get_extension_mock + + +@pytest.fixture +def create_extension_mock(): + with mock.patch.object( + extension_registry_service.ExtensionRegistryServiceClient, + "import_extension", + ) as create_extension_mock: + create_extension_lro_mock = mock.Mock(ga_operation.Operation) + create_extension_lro_mock.result.return_value = _TEST_EXTENSION_OBJ + create_extension_mock.return_value = create_extension_lro_mock + yield create_extension_mock + + +@pytest.fixture +def execute_extension_mock(): + with mock.patch.object( + extension_execution_service.ExtensionExecutionServiceClient, "execute_extension" + ) as execute_extension_mock: + response_mock = mock.MagicMock() + response_mock.content.return_value = _TEST_RESPONSE_CONTENT + api_client_mock = mock.MagicMock() + api_client_mock.execute_extension.return_value = response_mock + execute_extension_mock.return_value = api_client_mock + yield execute_extension_mock + + +@pytest.fixture +def delete_extension_mock(): + with mock.patch.object( + extension_registry_service.ExtensionRegistryServiceClient, + "delete_extension", + ) as delete_extension_mock: + delete_extension_lro_mock = mock.Mock(ga_operation.Operation) + delete_extension_lro_mock.result.return_value = None + delete_extension_mock.return_value = delete_extension_lro_mock + yield delete_extension_mock + + +@pytest.fixture +def to_dict_mock(): + with mock.patch.object(_utils, "to_dict") as to_dict_mock: + to_dict_mock.return_value = {} + yield to_dict_mock + + +@pytest.fixture +def load_yaml_mock(): + with mock.patch.object( + aip_utils.yaml_utils, + "load_yaml", + autospec=True, + ) as load_yaml_mock: + load_yaml_mock.return_value = lambda x: x + yield load_yaml_mock + + +@pytest.mark.usefixtures("google_auth_mock") +class TestExtension: + def setup_method(self): + importlib.reload(initializer) + importlib.reload(aiplatform) + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + credentials=_TEST_CREDENTIALS, + ) + + def teardown_method(self): + initializer.global_pool.shutdown(wait=True) + + def test_get_extension(self, get_extension_mock): + extensions.Extension(_TEST_RESOURCE_ID) + get_extension_mock.assert_called_once_with( + name=_TEST_EXTENSION_RESOURCE_NAME, + retry=aiplatform.base._DEFAULT_RETRY, + ) + + def test_create_extension( + self, + create_extension_mock, + get_extension_mock, + load_yaml_mock, + ): + extensions.Extension.create( + extension_name=_TEST_EXTENSION_RESOURCE_NAME, + display_name=_TEST_EXTENSION_DISPLAY_NAME, + manifest=_TEST_EXTENSION_MANIFEST_WITH_GCS_URI_OBJ, + ) + create_extension_mock.assert_called_once_with( + parent=_TEST_PARENT, + extension=_TEST_EXTENSION_OBJ, + ) + get_extension_mock.assert_called_once_with( + name=_TEST_EXTENSION_RESOURCE_NAME, + retry=aiplatform.base._DEFAULT_RETRY, + ) + + def test_delete_after_create_extension( + self, + create_extension_mock, + get_extension_mock, + delete_extension_mock, + load_yaml_mock, + ): + test_extension = extensions.Extension.create( + extension_name=_TEST_EXTENSION_RESOURCE_NAME, + display_name=_TEST_EXTENSION_DISPLAY_NAME, + manifest=_TEST_EXTENSION_MANIFEST_WITH_GCS_URI_OBJ, + ) + create_extension_mock.assert_called_once_with( + parent=_TEST_PARENT, + extension=_TEST_EXTENSION_OBJ, + ) + get_extension_mock.assert_any_call( + name=_TEST_EXTENSION_RESOURCE_NAME, + retry=aiplatform.base._DEFAULT_RETRY, + ) + # Manually set _gca_resource here to prevent the mocks from propagating. + test_extension._gca_resource = _TEST_EXTENSION_OBJ + test_extension.delete() + delete_extension_mock.assert_called_once_with( + name=test_extension.resource_name, + ) + + def test_delete_after_get_extension( + self, + get_extension_mock, + delete_extension_mock, + load_yaml_mock, + ): + test_extension = extensions.Extension(_TEST_RESOURCE_ID) + get_extension_mock.assert_any_call( + name=_TEST_EXTENSION_RESOURCE_NAME, + retry=aiplatform.base._DEFAULT_RETRY, + ) + # Manually set _gca_resource here to prevent the mocks from propagating. + test_extension._gca_resource = _TEST_EXTENSION_OBJ + test_extension.delete() + delete_extension_mock.assert_called_once_with( + name=test_extension.resource_name, + ) + + def test_execute_extension( + self, + get_extension_mock, + execute_extension_mock, + load_yaml_mock, + ): + test_extension = extensions.Extension(_TEST_RESOURCE_ID) + get_extension_mock.assert_called_once_with( + name=_TEST_EXTENSION_RESOURCE_NAME, + retry=aiplatform.base._DEFAULT_RETRY, + ) + # Manually set _gca_resource here to prevent the mocks from propagating. + test_extension._gca_resource = _TEST_EXTENSION_OBJ + test_extension.execute( + operation_id=_TEST_EXTENSION_OPERATION_ID, + operation_params=_TEST_EXTENSION_OPERATION_PARAMS, + runtime_auth_config=_TEST_AUTH_CONFIG, + ) + execute_extension_mock.assert_called_once_with( + types.ExecuteExtensionRequest( + name=_TEST_EXTENSION_RESOURCE_NAME, + operation_id=_TEST_EXTENSION_OPERATION_ID, + operation_params=_utils.to_proto( + _TEST_EXTENSION_OPERATION_PARAMS, + ), + runtime_auth_config=_TEST_AUTH_CONFIG, + ), + ) + + def test_api_spec_from_yaml(self, get_extension_mock, load_yaml_mock): + test_extension = extensions.Extension(_TEST_RESOURCE_ID) + get_extension_mock.assert_called_once_with( + name=_TEST_EXTENSION_RESOURCE_NAME, + retry=aiplatform.base._DEFAULT_RETRY, + ) + # Manually set _gca_resource here to prevent the mocks from propagating. + test_extension._gca_resource = _TEST_EXTENSION_WITH_YAML_API_SPEC_OBJ + test_extension.api_spec() == {} + + def test_no_api_spec(self, get_extension_mock, load_yaml_mock): + test_extension = extensions.Extension(_TEST_RESOURCE_ID) + get_extension_mock.assert_called_once_with( + name=_TEST_EXTENSION_RESOURCE_NAME, + retry=aiplatform.base._DEFAULT_RETRY, + ) + # Manually set _gca_resource here to prevent the mocks from propagating. + test_extension._gca_resource = _TEST_EXTENSION_WITH_NO_API_SPEC_OBJ + test_extension.api_spec() == {} + + def test_api_spec_from_gcs_uri( + self, + get_extension_mock, + load_yaml_mock, + ): + test_extension = extensions.Extension(_TEST_RESOURCE_ID) + get_extension_mock.assert_called_once_with( + name=_TEST_EXTENSION_RESOURCE_NAME, + retry=aiplatform.base._DEFAULT_RETRY, + ) + # Manually set _gca_resource here to prevent the mocks from propagating. + test_extension._gca_resource = _TEST_EXTENSION_OBJ + test_extension.api_spec() + load_yaml_mock.assert_called_once_with(_TEST_OPEN_API_GCS_URI) + + def test_operation_schemas(self, get_extension_mock): + test_extension = extensions.Extension(_TEST_RESOURCE_ID) + get_extension_mock.assert_called_once_with( + name=_TEST_EXTENSION_RESOURCE_NAME, + retry=aiplatform.base._DEFAULT_RETRY, + ) + # Manually set _gca_resource here to prevent the mocks from propagating. + test_extension._gca_resource = _TEST_EXTENSION_OBJ + test_extension.operation_schemas() diff --git a/tests/unit/vertexai/test_generative_models.py b/tests/unit/vertexai/test_generative_models.py index 2ad703f1da..5ef9bdf77a 100644 --- a/tests/unit/vertexai/test_generative_models.py +++ b/tests/unit/vertexai/test_generative_models.py @@ -36,7 +36,9 @@ _TEST_PROJECT = "test-project" +_TEST_PROJECT2 = "test-project2" _TEST_LOCATION = "us-central1" +_TEST_LOCATION2 = "europe-west4" _RESPONSE_TEXT_PART_STRUCT = { @@ -283,6 +285,50 @@ def setup_method(self): def teardown_method(self): initializer.global_pool.shutdown(wait=True) + @mock.patch.object( + target=prediction_service.PredictionServiceClient, + attribute="generate_content", + new=mock_generate_content, + ) + @pytest.mark.parametrize( + "generative_models", + [generative_models, preview_generative_models], + ) + def test_generative_model_constructor_model_name( + self, generative_models: generative_models + ): + project_location_prefix = ( + f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}/" + ) + + model_name1 = "gemini-pro" + model1 = generative_models.GenerativeModel(model_name1) + assert ( + model1._prediction_resource_name + == project_location_prefix + "publishers/google/models/" + model_name1 + ) + + model_name2 = "models/gemini-pro" + model2 = generative_models.GenerativeModel(model_name2) + assert ( + model2._prediction_resource_name + == project_location_prefix + "publishers/google/" + model_name2 + ) + + model_name3 = "publishers/some_publisher/models/some_model" + model3 = generative_models.GenerativeModel(model_name3) + assert model3._prediction_resource_name == project_location_prefix + model_name3 + + model_name4 = ( + f"projects/{_TEST_PROJECT2}/locations/{_TEST_LOCATION2}/endpoints/endpoint1" + ) + model4 = generative_models.GenerativeModel(model_name4) + assert model4._prediction_resource_name == model_name4 + assert _TEST_LOCATION2 in model4._prediction_client._api_endpoint + + with pytest.raises(ValueError): + generative_models.GenerativeModel("foo/bar/models/gemini-pro") + @mock.patch.object( target=prediction_service.PredictionServiceClient, attribute="generate_content", @@ -297,7 +343,14 @@ def test_generate_content(self, generative_models: generative_models): response = model.generate_content("Why is sky blue?") assert response.text - response2 = model.generate_content( + model2 = generative_models.GenerativeModel( + "gemini-pro", + system_instruction=[ + "Talk like a pirate.", + "Don't use rude words.", + ], + ) + response2 = model2.generate_content( "Why is sky blue?", generation_config=generative_models.GenerationConfig( temperature=0.2, @@ -307,6 +360,18 @@ def test_generate_content(self, generative_models: generative_models): max_output_tokens=200, stop_sequences=["\n\n\n"], ), + safety_settings=[ + generative_models.SafetySetting( + category=generative_models.SafetySetting.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT, + threshold=generative_models.SafetySetting.HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE, + method=generative_models.SafetySetting.HarmBlockMethod.SEVERITY, + ), + generative_models.SafetySetting( + category=generative_models.SafetySetting.HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold=generative_models.SafetySetting.HarmBlockThreshold.BLOCK_ONLY_HIGH, + method=generative_models.SafetySetting.HarmBlockMethod.PROBABILITY, + ), + ], ) assert response2.text @@ -443,6 +508,67 @@ def test_chat_function_calling(self, generative_models: generative_models): assert "nice" in response2.text assert not response2.candidates[0].function_calls + @mock.patch.object( + target=prediction_service.PredictionServiceClient, + attribute="generate_content", + new=mock_generate_content, + ) + @pytest.mark.parametrize( + "generative_models", + [preview_generative_models], + ) + def test_chat_forced_function_calling(self, generative_models: generative_models): + get_current_weather_func = generative_models.FunctionDeclaration( + name="get_current_weather", + description="Get the current weather in a given location", + parameters=_REQUEST_FUNCTION_PARAMETER_SCHEMA_STRUCT, + ) + weather_tool = generative_models.Tool( + function_declarations=[get_current_weather_func], + ) + + tool_config = generative_models.ToolConfig( + function_calling_config=generative_models.ToolConfig.FunctionCallingConfig( + mode=generative_models.ToolConfig.FunctionCallingConfig.Mode.ANY, + allowed_function_names=["get_current_weather"], + ) + ) + + model = generative_models.GenerativeModel( + "gemini-pro", + # Specifying the tools once to avoid specifying them in every request + tools=[weather_tool], + tool_config=tool_config, + ) + chat = model.start_chat() + + response1 = chat.send_message("What is the weather like in Boston?") + assert ( + response1.candidates[0].content.parts[0].function_call.name + == "get_current_weather" + ) + assert [ + function_call.name + for function_call in response1.candidates[0].function_calls + ] == ["get_current_weather"] + function_map = { + "get_current_weather": get_current_weather, + } + function_response_parts = [] + for function_call in response1.candidates[0].function_calls: + function = function_map[function_call.name] + function_result = function(**function_call.args) + function_response_part = generative_models.Part.from_function_response( + name=function_call.name, + response=function_result, + ) + function_response_parts.append(function_response_part) + + response2 = chat.send_message(function_response_parts) + assert "Boston" in response2.text + assert "nice" in response2.text + assert not response2.candidates[0].function_calls + @mock.patch.object( target=prediction_service.PredictionServiceClient, attribute="generate_content", diff --git a/tests/unit/vertexai/test_reasoning_engines.py b/tests/unit/vertexai/test_reasoning_engines.py new file mode 100644 index 0000000000..ffa8c65613 --- /dev/null +++ b/tests/unit/vertexai/test_reasoning_engines.py @@ -0,0 +1,491 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import cloudpickle +import importlib +import sys +import tarfile +from absl.testing import parameterized +from typing import Optional +from unittest import mock + +from google import auth +from google.api_core import operation as ga_operation +from google.auth import credentials as auth_credentials +from google.cloud import storage +from google.cloud import aiplatform +from google.cloud.aiplatform import base +from google.cloud.aiplatform import initializer +from google.cloud.aiplatform_v1beta1 import types +from google.cloud.aiplatform_v1beta1.services import reasoning_engine_execution_service +from google.cloud.aiplatform_v1beta1.services import reasoning_engine_service +from vertexai.preview import reasoning_engines +from vertexai.reasoning_engines import _utils +from vertexai.reasoning_engines import _reasoning_engines +import pytest + + +class CapitalizeEngine: + """A sample Reasoning Engine.""" + + def set_up(self): + pass + + def query(self, unused_arbitrary_string_name: str) -> str: + """Runs the engine.""" + return unused_arbitrary_string_name.upper() + + +_TEST_RETRY = base._DEFAULT_RETRY +_TEST_CREDENTIALS = mock.Mock(spec=auth_credentials.AnonymousCredentials()) +_TEST_STAGING_BUCKET = "gs://test-bucket" +_TEST_LOCATION = "us-central1" +_TEST_PROJECT = "test-project" +_TEST_RESOURCE_ID = "1028944691210842416" +_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}" +_TEST_REASONING_ENGINE_RESOURCE_NAME = ( + f"{_TEST_PARENT}/reasoningEngines/{_TEST_RESOURCE_ID}" +) +_TEST_REASONING_ENGINE_DISPLAY_NAME = "Reasoning Engine Display Name" +_TEST_GCS_DIR_NAME = _reasoning_engines._DEFAULT_GCS_DIR_NAME +_TEST_BLOB_FILENAME = _reasoning_engines._BLOB_FILENAME +_TEST_REQUIREMENTS_FILE = _reasoning_engines._REQUIREMENTS_FILE +_TEST_EXTRA_PACKAGES_FILE = _reasoning_engines._EXTRA_PACKAGES_FILE +_TEST_QUERY_PROMPT = "Find the first fibonacci number greater than 999" +_TEST_REASONING_ENGINE_GCS_URI = "{}/{}/{}".format( + _TEST_STAGING_BUCKET, + _TEST_GCS_DIR_NAME, + _TEST_BLOB_FILENAME, +) +_TEST_REASONING_ENGINE_DEPENDENCY_FILES_GCS_URI = "{}/{}/{}".format( + _TEST_STAGING_BUCKET, + _TEST_GCS_DIR_NAME, + _TEST_EXTRA_PACKAGES_FILE, +) +_TEST_REASONING_ENGINE_REQUIREMENTS_GCS_URI = "{}/{}/{}".format( + _TEST_STAGING_BUCKET, + _TEST_GCS_DIR_NAME, + _TEST_REQUIREMENTS_FILE, +) +_TEST_REASONING_ENGINE_REQUIREMENTS = [ + "google-cloud-aiplatform==1.29.0", + "langchain", +] +_TEST_REASONING_ENGINE_EXTRA_PACKAGES = [ + "lib", + "main.py", +] +_TEST_REASONING_ENGINE_QUERY_SCHEMA = _utils.to_proto( + _utils.generate_schema( + CapitalizeEngine().query, + schema_name="CapitalizeEngine_query", + ) +) +_TEST_REASONING_ENGINE_OBJ = types.ReasoningEngine( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + display_name=_TEST_REASONING_ENGINE_DISPLAY_NAME, + spec=types.ReasoningEngineSpec( + package_spec=types.ReasoningEngineSpec.PackageSpec( + python_version=f"{sys.version_info.major}.{sys.version_info.minor}", + pickle_object_gcs_uri=_TEST_REASONING_ENGINE_GCS_URI, + dependency_files_gcs_uri=_TEST_REASONING_ENGINE_DEPENDENCY_FILES_GCS_URI, + requirements_gcs_uri=_TEST_REASONING_ENGINE_REQUIREMENTS_GCS_URI, + ), + ), +) +_TEST_REASONING_ENGINE_OBJ.spec.class_methods.append( + _TEST_REASONING_ENGINE_QUERY_SCHEMA +) +_TEST_REASONING_ENGINE_QUERY_REQUEST = types.QueryReasoningEngineRequest( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + input={"query": _TEST_QUERY_PROMPT}, +) +_TEST_REASONING_ENGINE_QUERY_RESPONSE = {} +_TEST_REASONING_ENGINE_OPERATION_SCHEMAS = [] +_TEST_REASONING_ENGINE_SYS_VERSION = "3.10" + + +@pytest.fixture(scope="module") +def google_auth_mock(): + with mock.patch.object(auth, "default") as google_auth_mock: + google_auth_mock.return_value = ( + auth_credentials.AnonymousCredentials(), + _TEST_PROJECT, + ) + yield google_auth_mock + + +@pytest.fixture(scope="module") +def cloud_storage_get_bucket_mock(): + with mock.patch.object(storage, "Client") as cloud_storage_mock: + bucket_mock = mock.Mock(spec=storage.Bucket) + bucket_mock.blob.return_value.open.return_value = "blob_file" + bucket_mock.blob.return_value.upload_from_filename.return_value = None + bucket_mock.blob.return_value.upload_from_string.return_value = None + + cloud_storage_mock.get_bucket.return_value = bucket_mock + + yield cloud_storage_mock + + +@pytest.fixture(scope="module") +def cloud_storage_create_bucket_mock(): + with mock.patch.object(storage, "Client") as cloud_storage_mock: + bucket_mock = mock.Mock(spec=storage.Bucket) + bucket_mock.blob.return_value.open.return_value = "blob_file" + bucket_mock.blob.return_value.upload_from_filename.return_value = None + bucket_mock.blob.return_value.upload_from_string.return_value = None + + cloud_storage_mock.get_bucket = mock.Mock( + side_effect=ValueError("bucket not found") + ) + cloud_storage_mock.bucket.return_value = bucket_mock + cloud_storage_mock.create_bucket.return_value = bucket_mock + + yield cloud_storage_mock + + +@pytest.fixture(scope="module") +def tarfile_open_mock(): + with mock.patch.object(tarfile, "open") as tarfile_open_mock: + tarfile_mock = mock.Mock() + tarfile_mock.add.return_value = None + tarfile_open_mock().__enter__().return_value = tarfile_mock + yield tarfile_open_mock + + +@pytest.fixture(scope="module") +def cloudpickle_dump_mock(): + with mock.patch.object(cloudpickle, "dump") as cloudpickle_dump_mock: + cloudpickle_dump_mock.return_value = None + yield cloudpickle_dump_mock + + +@pytest.fixture(scope="module") +def get_reasoning_engine_mock(): + with mock.patch.object( + reasoning_engine_service.ReasoningEngineServiceClient, + "get_reasoning_engine", + ) as get_reasoning_engine_mock: + api_client_mock = mock.Mock( + spec=reasoning_engine_service.ReasoningEngineServiceClient, + ) + api_client_mock.get_reasoning_engine.return_value = _TEST_REASONING_ENGINE_OBJ + get_reasoning_engine_mock.return_value = api_client_mock + yield get_reasoning_engine_mock + + +@pytest.fixture(scope="module") +def create_reasoning_engine_mock(): + with mock.patch.object( + reasoning_engine_service.ReasoningEngineServiceClient, + "create_reasoning_engine", + ) as create_reasoning_engine_mock: + create_reasoning_engine_lro_mock = mock.Mock(ga_operation.Operation) + create_reasoning_engine_lro_mock.result.return_value = ( + _TEST_REASONING_ENGINE_OBJ + ) + create_reasoning_engine_mock.return_value = create_reasoning_engine_lro_mock + yield create_reasoning_engine_mock + + +@pytest.fixture(scope="module") +def delete_reasoning_engine_mock(): + with mock.patch.object( + reasoning_engine_service.ReasoningEngineServiceClient, + "delete_reasoning_engine", + ) as delete_reasoning_engine_mock: + delete_reasoning_engine_lro_mock = mock.Mock(ga_operation.Operation) + delete_reasoning_engine_lro_mock.result.return_value = None + delete_reasoning_engine_mock.return_value = delete_reasoning_engine_lro_mock + yield delete_reasoning_engine_mock + + +@pytest.fixture(scope="module") +def query_reasoning_engine_mock(): + with mock.patch.object( + reasoning_engine_execution_service.ReasoningEngineExecutionServiceClient, + "query_reasoning_engine", + ) as query_reasoning_engine_mock: + api_client_mock = mock.Mock( + spec=reasoning_engine_execution_service.ReasoningEngineExecutionServiceClient, + ) + api_client_mock.query_reasoning_engine.return_value = ( + _TEST_REASONING_ENGINE_QUERY_RESPONSE + ) + query_reasoning_engine_mock.return_value = api_client_mock + yield query_reasoning_engine_mock + + +@pytest.fixture(scope="module") +def to_dict_mock(): + with mock.patch.object(_utils, "to_dict") as to_dict_mock: + to_dict_mock.return_value = {} + yield to_dict_mock + + +class InvalidCapitalizeEngineWithoutQuerySelf: + """A sample Reasoning Engine with an invalid query method.""" + + def set_up(self): + pass + + def query() -> str: + """Runs the engine.""" + return "RESPONSE" + + +@pytest.mark.usefixtures("google_auth_mock") +class TestReasoningEngine: + def setup_method(self): + importlib.reload(initializer) + importlib.reload(aiplatform) + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + credentials=_TEST_CREDENTIALS, + staging_bucket=_TEST_STAGING_BUCKET, + ) + self.test_app = CapitalizeEngine() + + def teardown_method(self): + initializer.global_pool.shutdown(wait=True) + + def test_get_reasoning_engine(self, get_reasoning_engine_mock): + reasoning_engines.ReasoningEngine(_TEST_RESOURCE_ID) + get_reasoning_engine_mock.assert_called_with( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + retry=_TEST_RETRY, + ) + + def test_create_reasoning_engine( + self, + create_reasoning_engine_mock, + cloud_storage_create_bucket_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + get_reasoning_engine_mock, + ): + test_reasoning_engine = reasoning_engines.ReasoningEngine.create( + self.test_app, + reasoning_engine_name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + display_name=_TEST_REASONING_ENGINE_DISPLAY_NAME, + requirements=_TEST_REASONING_ENGINE_REQUIREMENTS, + extra_packages=_TEST_REASONING_ENGINE_EXTRA_PACKAGES, + ) + # Manually set _gca_resource here to prevent the mocks from propagating. + test_reasoning_engine._gca_resource = _TEST_REASONING_ENGINE_OBJ + create_reasoning_engine_mock.assert_called_with( + parent=_TEST_PARENT, + reasoning_engine=test_reasoning_engine.gca_resource, + ) + get_reasoning_engine_mock.assert_called_with( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + retry=_TEST_RETRY, + ) + + def test_delete_after_create_reasoning_engine( + self, + create_reasoning_engine_mock, + cloud_storage_get_bucket_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + get_reasoning_engine_mock, + delete_reasoning_engine_mock, + ): + test_reasoning_engine = reasoning_engines.ReasoningEngine.create( + self.test_app, + reasoning_engine_name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + display_name=_TEST_REASONING_ENGINE_DISPLAY_NAME, + requirements=_TEST_REASONING_ENGINE_REQUIREMENTS, + ) + # Manually set _gca_resource here to prevent the mocks from propagating. + test_reasoning_engine._gca_resource = _TEST_REASONING_ENGINE_OBJ + create_reasoning_engine_mock.assert_called_with( + parent=_TEST_PARENT, + reasoning_engine=test_reasoning_engine.gca_resource, + ) + get_reasoning_engine_mock.assert_called_with( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + retry=_TEST_RETRY, + ) + test_reasoning_engine.delete() + delete_reasoning_engine_mock.assert_called_with( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + ) + + def test_delete_after_get_reasoning_engine( + self, + get_reasoning_engine_mock, + delete_reasoning_engine_mock, + ): + test_reasoning_engine = reasoning_engines.ReasoningEngine(_TEST_RESOURCE_ID) + # Manually set _gca_resource here to prevent the mocks from propagating. + test_reasoning_engine._gca_resource = _TEST_REASONING_ENGINE_OBJ + get_reasoning_engine_mock.assert_called_with( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + retry=_TEST_RETRY, + ) + test_reasoning_engine.delete() + delete_reasoning_engine_mock.assert_called_with( + name=test_reasoning_engine.resource_name, + ) + + def test_query_reasoning_engine( + self, + get_reasoning_engine_mock, + query_reasoning_engine_mock, + to_dict_mock, + ): + test_reasoning_engine = reasoning_engines.ReasoningEngine(_TEST_RESOURCE_ID) + # Manually set _gca_resource here to prevent the mocks from propagating. + test_reasoning_engine._gca_resource = _TEST_REASONING_ENGINE_OBJ + get_reasoning_engine_mock.assert_called_with( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + retry=_TEST_RETRY, + ) + test_reasoning_engine.query(query=_TEST_QUERY_PROMPT) + query_reasoning_engine_mock.assert_called_with( + request=_TEST_REASONING_ENGINE_QUERY_REQUEST + ) + to_dict_mock.assert_called_once() + + def test_operation_schemas(self, get_reasoning_engine_mock): + test_reasoning_engine = reasoning_engines.ReasoningEngine(_TEST_RESOURCE_ID) + # Manually set _gca_resource here to prevent the mocks from propagating. + test_reasoning_engine._gca_resource = _TEST_REASONING_ENGINE_OBJ + test_reasoning_engine._operation_schemas = ( + _TEST_REASONING_ENGINE_OPERATION_SCHEMAS + ) + get_reasoning_engine_mock.assert_called_with( + name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + retry=_TEST_RETRY, + ) + assert test_reasoning_engine.operation_schemas() == ( + _TEST_REASONING_ENGINE_OPERATION_SCHEMAS + ) + + +@pytest.mark.usefixtures("google_auth_mock") +class TestReasoningEngineErrors: + def setup_method(self): + importlib.reload(initializer) + importlib.reload(aiplatform) + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + credentials=_TEST_CREDENTIALS, + staging_bucket=_TEST_STAGING_BUCKET, + ) + self.test_app = CapitalizeEngine() + self.invalid_app = InvalidCapitalizeEngineWithoutQuerySelf() + + def test_create_reasoning_engine_unsupported_sys_version( + self, + create_reasoning_engine_mock, + cloud_storage_create_bucket_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + get_reasoning_engine_mock, + ): + with pytest.raises(ValueError, match="Unsupported python version"): + reasoning_engines.ReasoningEngine.create( + self.test_app, + reasoning_engine_name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + display_name=_TEST_REASONING_ENGINE_DISPLAY_NAME, + requirements=_TEST_REASONING_ENGINE_REQUIREMENTS, + extra_packages=_TEST_REASONING_ENGINE_EXTRA_PACKAGES, + sys_version="2.6", + ) + + def test_create_reasoning_engine_with_invalid_query_method( + self, + create_reasoning_engine_mock, + cloud_storage_create_bucket_mock, + tarfile_open_mock, + cloudpickle_dump_mock, + get_reasoning_engine_mock, + ): + with pytest.raises(ValueError, match="Invalid query signature"): + reasoning_engines.ReasoningEngine.create( + self.invalid_app, + reasoning_engine_name=_TEST_REASONING_ENGINE_RESOURCE_NAME, + display_name=_TEST_REASONING_ENGINE_DISPLAY_NAME, + requirements=_TEST_REASONING_ENGINE_REQUIREMENTS, + extra_packages=_TEST_REASONING_ENGINE_EXTRA_PACKAGES, + ) + + +def place_tool_query( + city: str, + activity: Optional[str] = None, + page_size: int = 3, +): + """Searches the city for recommendations on the activity.""" + pass + + +def place_photo_query( + photo_reference: str, + maxwidth: int = 400, + maxheight: Optional[int] = None, +): + """Returns the photo for a given reference.""" + pass + + +class TestGenerateSchema(parameterized.TestCase): + @parameterized.named_parameters( + dict( + testcase_name="place_tool_query", + func=place_tool_query, + required=["city", "activity"], + expected_operation={ + "name": "place_tool_query", + "description": ( + "Searches the city for recommendations on the activity." + ), + "parameters": { + "type": "object", + "properties": { + "city": {"type": "string"}, + "activity": {"type": "string", "nullable": True}, + "page_size": {"type": "integer"}, + }, + "required": ["city", "activity"], + }, + }, + ), + dict( + testcase_name="place_photo_query", + func=place_photo_query, + required=["photo_reference"], + expected_operation={ + "name": "place_photo_query", + "description": "Returns the photo for a given reference.", + "parameters": { + "properties": { + "photo_reference": {"type": "string"}, + "maxwidth": {"type": "integer"}, + "maxheight": {"type": "integer", "nullable": True}, + }, + "required": ["photo_reference"], + "type": "object", + }, + }, + ), + ) + def test_generate_schemas(self, func, required, expected_operation): + result = _utils.generate_schema(func, required=required) + self.assertDictEqual(result, expected_operation) diff --git a/tests/unit/vertexai/test_tuning.py b/tests/unit/vertexai/test_tuning.py new file mode 100644 index 0000000000..73506ad417 --- /dev/null +++ b/tests/unit/vertexai/test_tuning.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Unit tests for generative model tuning.""" +# pylint: disable=protected-access,bad-continuation + +import copy +import datetime +from typing import Dict, Iterable +from unittest import mock +import uuid + +import vertexai +from google.cloud.aiplatform import compat +from google.cloud.aiplatform import initializer +from google.cloud.aiplatform import utils as aiplatform_utils +from google.cloud.aiplatform.metadata import experiment_resources +from google.cloud.aiplatform_v1.services import gen_ai_tuning_service +from google.cloud.aiplatform_v1.types import job_state +from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job +from vertexai.preview import tuning +from vertexai.preview.tuning import sft as supervised_tuning + +import pytest + +from unittest.mock import patch + +from google.rpc import status_pb2 + + +_TEST_PROJECT = "test-project" +_TEST_LOCATION = "us-central1" + + +_global_tuning_jobs: Dict[str, gca_tuning_job.TuningJob] = {} + + +class MockGenAiTuningServiceClient(gen_ai_tuning_service.GenAiTuningServiceClient): + @property + def _tuning_jobs(self) -> Dict[str, gca_tuning_job.TuningJob]: + return _global_tuning_jobs + + def create_tuning_job( + self, + *, + parent: str, + tuning_job: gca_tuning_job.TuningJob, + **_, + ) -> gca_tuning_job.TuningJob: + tuning_job = copy.deepcopy(tuning_job) + resource_id = uuid.uuid4().hex + resource_name = f"{parent}/tuningJobs/{resource_id}" + tuning_job.name = resource_name + current_time = datetime.datetime.now(datetime.timezone.utc) + tuning_job.create_time = current_time + tuning_job.update_time = current_time + tuning_job.state = job_state.JobState.JOB_STATE_PENDING + self._tuning_jobs[resource_name] = tuning_job + return tuning_job + + def _progress_tuning_job(self, name: str): + tuning_job: gca_tuning_job.TuningJob = self._tuning_jobs[name] + current_time = datetime.datetime.now(datetime.timezone.utc) + if tuning_job.state == job_state.JobState.JOB_STATE_PENDING: + if ( + "invalid_dataset" + in tuning_job.supervised_tuning_spec.training_dataset_uri + ): + tuning_job.state = job_state.JobState.JOB_STATE_FAILED + tuning_job.error = status_pb2.Status( + code=400, message="Invalid dataset." + ) + else: + tuning_job.state = job_state.JobState.JOB_STATE_RUNNING + tuning_job.update_time = current_time + elif tuning_job.state == job_state.JobState.JOB_STATE_RUNNING: + parent = tuning_job.name.partition("/tuningJobs/")[0] + tuning_job.state = job_state.JobState.JOB_STATE_SUCCEEDED + experiment_id = uuid.uuid4().hex + tuned_model_id = uuid.uuid4().hex + tuned_model_endpoint_id = uuid.uuid4().hex + tuning_job.experiment = ( + f"{parent}/metadataStores/default/contexts/{experiment_id}" + ) + tuning_job.tuned_model = gca_tuning_job.TunedModel( + model=f"{parent}/models/{tuned_model_id}", + endpoint=f"{parent}/endpoints/{tuned_model_endpoint_id}", + ) + tuning_job.end_time = current_time + tuning_job.update_time = current_time + else: + pass + + def get_tuning_job(self, *, name: str, **_) -> gca_tuning_job.TuningJob: + tuning_job = self._tuning_jobs[name] + tuning_job = copy.deepcopy(tuning_job) + self._progress_tuning_job(name) + + return tuning_job + + def list_tuning_jobs( + self, *, parent: str, **_ + ) -> Iterable[gca_tuning_job.TuningJob]: + return [ + tuning_job + for name, tuning_job in self._tuning_jobs.items() + if name.startswith(parent + "/") + ] + + def cancel_tuning_job(self, *, name: str, **_) -> None: + tuning_job = self._tuning_jobs[name] + assert tuning_job.state in ( + job_state.JobState.JOB_STATE_RUNNING, + job_state.JobState.JOB_STATE_PENDING, + ) + tuning_job.state = job_state.JobState.JOB_STATE_CANCELLED + + +class MockTuningJobClientWithOverride(aiplatform_utils.ClientWithOverride): + _is_temporary = False + _default_version = compat.V1 + _version_map = ( + (compat.V1, MockGenAiTuningServiceClient), + # v1beta1 version does not exist + # (compat.V1BETA1, gen_ai_tuning_service_v1beta1.client.JobServiceClient), + ) + + +@pytest.fixture() +def experiment_init_mock(): + with patch.object(experiment_resources.Experiment, "__init__") as experiment_mock: + experiment_mock.return_value = None + yield experiment_mock + + +@pytest.mark.usefixtures("google_auth_mock", "experiment_init_mock") +class TestgenerativeModelTuning: + """Unit tests for generative model tuning.""" + + def setup_method(self): + vertexai.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + ) + + def teardown_method(self): + initializer.global_pool.shutdown(wait=True) + + @mock.patch.object( + target=tuning.TuningJob, + attribute="client_class", + new=MockTuningJobClientWithOverride, + ) + def test_genai_tuning_service_supervised_tuning_tune_model(self): + sft_tuning_job = supervised_tuning.train( + source_model="gemini-1.0-pro-001", + train_dataset="gs://some-bucket/some_dataset.jsonl", + # Optional: + validation_dataset="gs://some-bucket/some_dataset.jsonl", + epochs=300, + learning_rate_multiplier=1.0, + ) + assert sft_tuning_job.state == job_state.JobState.JOB_STATE_PENDING + assert not sft_tuning_job.has_ended + assert not sft_tuning_job.has_succeeded + + # Refreshing the job + sft_tuning_job.refresh() + assert sft_tuning_job.state == job_state.JobState.JOB_STATE_PENDING + assert not sft_tuning_job.has_ended + assert not sft_tuning_job.has_succeeded + + # Refreshing the job + sft_tuning_job.refresh() + assert sft_tuning_job.state == job_state.JobState.JOB_STATE_RUNNING + assert not sft_tuning_job.has_ended + assert not sft_tuning_job.has_succeeded + + # Refreshing the job + sft_tuning_job.refresh() + assert sft_tuning_job.state == job_state.JobState.JOB_STATE_SUCCEEDED + assert sft_tuning_job.has_ended + assert sft_tuning_job.has_succeeded + assert sft_tuning_job._experiment_name + assert sft_tuning_job.tuned_model_name + assert sft_tuning_job.tuned_model_endpoint_name diff --git a/vertexai/extensions/_extensions.py b/vertexai/extensions/_extensions.py new file mode 100644 index 0000000000..8dc44d3ed8 --- /dev/null +++ b/vertexai/extensions/_extensions.py @@ -0,0 +1,321 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json +from typing import Optional, Sequence, Union + +from google.cloud.aiplatform import base +from google.cloud.aiplatform import initializer +from google.cloud.aiplatform import utils as aip_utils +from google.cloud.aiplatform_v1beta1 import types +from vertexai.reasoning_engines import _utils + +from google.protobuf import struct_pb2 + +_LOGGER = base.Logger(__name__) + +_AuthConfigOrJson = Union[_utils.JsonDict, types.AuthConfig] +_StructOrJson = Union[_utils.JsonDict, struct_pb2.Struct] +_RuntimeConfigOrJson = Union[_utils.JsonDict, types.RuntimeConfig] + + +_VERTEX_EXTENSION_HUB = { + "code_interpreter": { + "display_name": "Code Interpreter", + "description": ( + "This extension generates and executes code in the specified language" + ), + "manifest": { + "name": "code_interpreter_tool", + "description": "Google Code Interpreter Extension", + "api_spec": { + "open_api_gcs_uri": ( + "gs://vertex-extension-public/code_interpreter.yaml" + ), + }, + "auth_config": { + "auth_type": "GOOGLE_SERVICE_ACCOUNT_AUTH", + "google_service_account_config": {}, + }, + }, + }, + "vertex_ai_search": { + "display_name": "Vertex AI Search", + "description": "This extension generates and executes search queries", + "manifest": { + "name": "vertex_ai_search", + "description": "Vertex AI Search Extension", + "api_spec": { + "open_api_gcs_uri": ( + "gs://vertex-extension-public/vertex_ai_search.yaml" + ), + }, + "auth_config": { + "auth_type": "GOOGLE_SERVICE_ACCOUNT_AUTH", + "google_service_account_config": {}, + }, + }, + }, +} + + +class Extension(base.VertexAiResourceNounWithFutureManager): + """Represents a Vertex AI Extension resource.""" + + client_class = aip_utils.ExtensionRegistryClientWithOverride + _resource_noun = "extension" + _getter_method = "get_extension" + _list_method = "list_extensions" + _delete_method = "delete_extension" + _parse_resource_name_method = "parse_extension_path" + _format_resource_name_method = "extension_path" + + def __init__(self, extension_name: str): + """Retrieves an extension resource. + + Args: + extension_name (str): + Required. A fully-qualified resource name or ID such as + "projects/123/locations/us-central1/extensions/456" or + "456" when project and location are initialized or passed. + """ + super().__init__(resource_name=extension_name) + self.execution_api_client = initializer.global_config.create_client( + client_class=aip_utils.ExtensionExecutionClientWithOverride, + ) + self._gca_resource = self._get_gca_resource( + resource_name=extension_name + ) + self._api_spec = None + self._operation_schemas = None + + @classmethod + def create( + cls, + manifest: Union[_utils.JsonDict, types.ExtensionManifest], + *, + extension_name: Optional[str] = None, + display_name: Optional[str] = None, + description: Optional[str] = None, + runtime_config: Optional[_RuntimeConfigOrJson] = None, + ): + """Creates a new Extension. + + Args: + manifest (Union[dict[str, Any], ExtensionManifest]): + Required. The manifest for the Extension to be created. + extension_name (str): + Optional. A fully-qualified extension resource name or extension + ID such as "projects/123/locations/us-central1/extensions/456" or + "456" when project and location are initialized or passed. If + specifying the extension ID, it should be 4-63 characters, valid + characters are lowercase letters, numbers and hyphens ("-"), + and it should start with a number or a lower-case letter. If not + provided, Vertex AI will generate a value for this ID. + display_name (str): + Optional. The user-defined name of the Extension. + The name can be up to 128 characters long and can comprise any + UTF-8 character. + description (str): + Optional. The description of the Extension. + runtime_config (Union[dict[str, Any], RuntimeConfig]): + Optional. Runtime config controlling the runtime behavior of + this Extension. Defaults to None. + + Returns: + Extension: The extension that was created. + """ + sdk_resource = cls.__new__(cls) + base.VertexAiResourceNounWithFutureManager.__init__( + sdk_resource, + resource_name=extension_name, + ) + extension = types.Extension( + name=extension_name, + display_name=display_name or cls._generate_display_name(), + description=description, + manifest=_utils.to_proto(manifest, types.ExtensionManifest()), + ) + if runtime_config: + extension.runtime_config = _utils.to_proto( + runtime_config, types.RuntimeConfig(), + ) + operation_future = sdk_resource.api_client.import_extension( + parent=initializer.global_config.common_location_path(), + extension=extension, + ) + _LOGGER.log_create_with_lro(cls, operation_future) + created_extension = operation_future.result() + _LOGGER.log_create_complete( + cls, + created_extension, + cls._resource_noun, + module_name="vertexai.preview.extensions", + ) + # We use `._get_gca_resource(...)` instead of `created_extension` to + # fully instantiate the attributes of the extension. + sdk_resource._gca_resource = sdk_resource._get_gca_resource( + resource_name=created_extension.name + ) + sdk_resource.execution_api_client = ( + initializer.global_config.create_client( + client_class=aip_utils.ExtensionExecutionClientWithOverride, + ) + ) + sdk_resource._api_spec = None + sdk_resource._operation_schemas = None + return sdk_resource + + @property + def resource_name(self) -> str: + """Full qualified resource name for the extension.""" + return self._gca_resource.name + + def api_spec(self) -> _utils.JsonDict: + """Returns the (Open)API Spec of the extension.""" + if self._api_spec is None: + self._api_spec = _load_api_spec( + self._gca_resource.manifest.api_spec + ) + return self._api_spec + + def operation_schemas(self) -> Sequence[_utils.JsonDict]: + """Returns the (Open)API schemas for each operation of the extension.""" + if self._operation_schemas is None: + self._operation_schemas = [ + _utils.to_dict(op.function_declaration) + for op in self._gca_resource.extension_operations + ] + return self._operation_schemas + + def execute( + self, + operation_id: str, + operation_params: Optional[_StructOrJson] = None, + runtime_auth_config: Optional[_AuthConfigOrJson] = None, + ) -> Union[_utils.JsonDict, str]: + """Executes an operation of the extension with the specified params. + + Args: + operation_id (str): + Required. The ID of the operation to be executed. + operation_params (Union[dict[str, Any], Struct]): + Optional. Parameters used for executing the operation. It should + be in a form of map with param name as the key and actual param + value as the value. E.g. if this operation requires a param + "name" to be set to "abc", you can set this to {"name": "abc"}. + Defaults to an empty dictionary. + runtime_auth_config (Union[dict[str, Any], AuthConfig]): + Optional. The Auth configuration to execute the operation. + + Returns: + The result of executing the extension operation. + """ + request = types.ExecuteExtensionRequest( + name=self.resource_name, + operation_id=operation_id, + operation_params=operation_params, + ) + if runtime_auth_config: + request.runtime_auth_config = _utils.to_proto( + runtime_auth_config, types.AuthConfig(), + ) + response = self.execution_api_client.execute_extension(request) + return _try_parse_execution_response(response) + + @classmethod + def from_hub( + cls, + name: str, + *, + runtime_config: Optional[_RuntimeConfigOrJson] = None, + ): + """Creates a new Extension from the set of first party extensions. + + Args: + name (str): + Required. The name of the extension in the hub to be created. + Supported values are "code_interpreter" and "vertex_ai_search". + runtime_config (Union[dict[str, Any], RuntimeConfig]): + Optional. Runtime config controlling the runtime behavior of + the Extension. Defaults to None. + + Returns: + Extension: The extension that was created. + + Raises: + ValueError: If the `name` is not supported in the hub. + ValueError: If the `runtime_config` is specified but inconsistent + with the name (e.g. the name was "code_interpreter" but the + runtime_config was based on "vertex_ai_search_runtime_config"). + """ + if runtime_config: + runtime_config = _utils.to_proto( + runtime_config, types.RuntimeConfig(), + ) + if name == "code_interpreter": + if runtime_config and not getattr( + runtime_config, + "code_interpreter_runtime_config", + None, + ): + raise ValueError( + "code_interpreter_runtime_config is required for " + "code_interpreter extension" + ) + elif name == "vertex_ai_search": + if not runtime_config: + raise ValueError( + "runtime_config is required for vertex_ai_search extension" + ) + if runtime_config and not getattr( + runtime_config, + "vertex_ai_search_runtime_config", + None, + ): + raise ValueError( + "vertex_ai_search_runtime_config is required for " + "vertex_ai_search extension" + ) + else: + raise ValueError(f"Unsupported 1P extension name: {name}") + extension_info = _VERTEX_EXTENSION_HUB[name] + return cls.create( + display_name=extension_info["display_name"], + description=extension_info["description"], + manifest=extension_info["manifest"], + runtime_config=runtime_config, + ) + + +def _try_parse_execution_response( + response: types.ExecuteExtensionResponse + ) -> Union[_utils.JsonDict, str]: + content: str = response.content + try: + content = json.loads(response.content) + except: + pass + return content + + +def _load_api_spec(api_spec) -> _utils.JsonDict: + """Loads the (Open)API Spec of the extension and converts it to JSON.""" + if api_spec.open_api_yaml: + yaml = aip_utils.yaml_utils._maybe_import_yaml() + return yaml.safe_load(api_spec.open_api_yaml) + elif api_spec.open_api_gcs_uri: + return aip_utils.yaml_utils.load_yaml(api_spec.open_api_gcs_uri) + return {} diff --git a/vertexai/generative_models/README.md b/vertexai/generative_models/README.md index aa02f500ca..941a287e80 100644 --- a/vertexai/generative_models/README.md +++ b/vertexai/generative_models/README.md @@ -63,6 +63,19 @@ print(vision_chat.send_message(["I like this image.", image])) print(vision_chat.send_message("What things do I like?.")) ``` +#### System instructions +``` +from vertexai.generative_models import GenerativeModel +model = GenerativeModel( + "gemini-1.0-pro", + system_instruction=[ + "Talk like a pirate.", + "Don't use rude words.", + ], +) +print(model.generate_content("Why is sky blue?")) +``` + #### Function calling ``` diff --git a/vertexai/generative_models/__init__.py b/vertexai/generative_models/__init__.py index e391040984..0a4458e4bd 100644 --- a/vertexai/generative_models/__init__.py +++ b/vertexai/generative_models/__init__.py @@ -30,6 +30,7 @@ Image, Part, ResponseValidationError, + SafetySetting, Tool, ) @@ -47,5 +48,6 @@ "Image", "Part", "ResponseValidationError", + "SafetySetting", "Tool", ] diff --git a/vertexai/generative_models/_generative_models.py b/vertexai/generative_models/_generative_models.py index 90f3995168..1e7b0f7191 100644 --- a/vertexai/generative_models/_generative_models.py +++ b/vertexai/generative_models/_generative_models.py @@ -32,6 +32,7 @@ ) from google.cloud.aiplatform import initializer as aiplatform_initializer +from google.cloud.aiplatform import utils as aiplatform_utils from google.cloud.aiplatform_v1beta1 import types as aiplatform_types from google.cloud.aiplatform_v1beta1.services import prediction_service from google.cloud.aiplatform_v1beta1.types import ( @@ -86,7 +87,7 @@ ] SafetySettingsType = Union[ - List[gapic_content_types.SafetySetting], + List["SafetySetting"], Dict[ gapic_content_types.HarmCategory, gapic_content_types.SafetySetting.HarmBlockThreshold, @@ -133,6 +134,8 @@ def __init__( generation_config: Optional[GenerationConfigType] = None, safety_settings: Optional[SafetySettingsType] = None, tools: Optional[List["Tool"]] = None, + tool_config: Optional["ToolConfig"] = None, + system_instruction: Optional[PartsType] = None, ): r"""Initializes GenerativeModel. @@ -144,10 +147,17 @@ def __init__( Args: model_name: Model Garden model resource name. + Alternatively, a tuned model endpoint resource name can be provided. generation_config: Default generation config to use in generate_content. safety_settings: Default safety settings to use in generate_content. tools: Default tools to use in generate_content. + tool_config: Default tool config to use in generate_content. + system_instruction: Default system instruction to use in generate_content. + Note: Only text should be used in parts. + Content of each part will become a separate paragraph. """ + if not model_name: + raise ValueError("model_name must not be empty") if "/" not in model_name: model_name = "publishers/google/models/" + model_name if model_name.startswith("models/"): @@ -156,13 +166,29 @@ def __init__( project = aiplatform_initializer.global_config.project location = aiplatform_initializer.global_config.location + if model_name.startswith("publishers/"): + prediction_resource_name = ( + f"projects/{project}/locations/{location}/{model_name}" + ) + elif model_name.startswith("projects/"): + prediction_resource_name = model_name + else: + raise ValueError( + "model_name must be either a Model Garden model ID or a full resource name." + ) + + location = aiplatform_utils.extract_project_and_location_from_parent( + prediction_resource_name + )["location"] + self._model_name = model_name - self._prediction_resource_name = ( - f"projects/{project}/locations/{location}/{model_name}" - ) + self._prediction_resource_name = prediction_resource_name + self._location = location self._generation_config = generation_config self._safety_settings = safety_settings self._tools = tools + self._tool_config = tool_config + self._system_instruction = system_instruction # Validating the parameters self._prepare_request( @@ -170,6 +196,8 @@ def __init__( generation_config=generation_config, safety_settings=safety_settings, tools=tools, + tool_config=tool_config, + system_instruction=system_instruction, ) @property @@ -179,6 +207,7 @@ def _prediction_client(self) -> prediction_service.PredictionServiceClient: self._prediction_client_value = ( aiplatform_initializer.global_config.create_client( client_class=prediction_service.PredictionServiceClient, + location_override=self._location, prediction_client=True, ) ) @@ -193,6 +222,7 @@ def _prediction_async_client( self._prediction_async_client_value = ( aiplatform_initializer.global_config.create_client( client_class=prediction_service.PredictionServiceAsyncClient, + location_override=self._location, prediction_client=True, ) ) @@ -205,6 +235,8 @@ def _prepare_request( generation_config: Optional[GenerationConfigType] = None, safety_settings: Optional[SafetySettingsType] = None, tools: Optional[List["Tool"]] = None, + tool_config: Optional["ToolConfig"] = None, + system_instruction: Optional[PartsType] = None, ) -> gapic_prediction_service_types.GenerateContentRequest: """Prepares a GAPIC GenerateContentRequest.""" if not contents: @@ -213,6 +245,8 @@ def _prepare_request( generation_config = generation_config or self._generation_config safety_settings = safety_settings or self._safety_settings tools = tools or self._tools + tool_config = tool_config or self._tool_config + system_instruction = system_instruction or self._system_instruction # contents can either be a list of Content objects (most generic case) if isinstance(contents, Sequence) and any( @@ -244,6 +278,10 @@ def _prepare_request( else: contents = [_to_content(contents)] + gapic_system_instruction: Optional[gapic_content_types.Content] = None + if system_instruction: + gapic_system_instruction = _to_content(system_instruction) + gapic_generation_config: Optional[gapic_content_types.GenerationConfig] = None if generation_config: if isinstance(generation_config, gapic_content_types.GenerationConfig): @@ -258,17 +296,20 @@ def _prepare_request( raise TypeError( "generation_config must either be a GenerationConfig object or a dictionary representation of it." ) + gapic_safety_settings = None if safety_settings: if isinstance(safety_settings, Sequence): - if not all( - isinstance(safety_setting, gapic_content_types.SafetySetting) - for safety_setting in safety_settings - ): - raise TypeError( - "When passing a list with SafetySettings objects, every item in a list must be a SafetySetting object." - ) - gapic_safety_settings = safety_settings + gapic_safety_settings = [] + for safety_setting in safety_settings: + if isinstance(safety_setting, gapic_content_types.SafetySetting): + gapic_safety_settings.append(safety_setting) + elif isinstance(safety_setting, SafetySetting): + gapic_safety_settings.append(safety_setting._raw_safety_setting) + else: + raise TypeError( + "When passing a list with SafetySettings objects, every item in a list must be a SafetySetting object." + ) elif isinstance(safety_settings, dict): gapic_safety_settings = [ gapic_content_types.SafetySetting( @@ -283,6 +324,7 @@ def _prepare_request( raise TypeError( "safety_settings must either be a list of SafetySettings objects or a dictionary mapping from HarmCategory to HarmBlockThreshold." ) + gapic_tools = None if tools: gapic_tools = [] @@ -294,6 +336,13 @@ def _prepare_request( else: raise TypeError(f"Unexpected tool type: {tool}.") + gapic_tool_config = None + if tool_config: + if isinstance(tool_config, ToolConfig): + gapic_tool_config = tool_config._gapic_tool_config + else: + raise TypeError("tool_config must be a ToolConfig object.") + return gapic_prediction_service_types.GenerateContentRequest( # The `model` parameter now needs to be set for the vision models. # Always need to pass the resource via the `model` parameter. @@ -303,6 +352,8 @@ def _prepare_request( generation_config=gapic_generation_config, safety_settings=gapic_safety_settings, tools=gapic_tools, + tool_config=gapic_tool_config, + system_instruction=gapic_system_instruction, ) def _parse_response( @@ -318,6 +369,7 @@ def generate_content( generation_config: Optional[GenerationConfigType] = None, safety_settings: Optional[SafetySettingsType] = None, tools: Optional[List["Tool"]] = None, + tool_config: Optional["ToolConfig"] = None, stream: bool = False, ) -> Union["GenerationResponse", Iterable["GenerationResponse"],]: """Generates content. @@ -333,6 +385,7 @@ def generate_content( generation_config: Parameters for the generation. safety_settings: Safety settings as a mapping from HarmCategory to HarmBlockThreshold. tools: A list of tools (functions) that the model can try calling. + tool_config: Config shared for all tools provided in the request. stream: Whether to stream the response. Returns: @@ -346,6 +399,7 @@ def generate_content( generation_config=generation_config, safety_settings=safety_settings, tools=tools, + tool_config=tool_config, ) else: return self._generate_content( @@ -353,6 +407,7 @@ def generate_content( generation_config=generation_config, safety_settings=safety_settings, tools=tools, + tool_config=tool_config, ) async def generate_content_async( @@ -362,6 +417,7 @@ async def generate_content_async( generation_config: Optional[GenerationConfigType] = None, safety_settings: Optional[SafetySettingsType] = None, tools: Optional[List["Tool"]] = None, + tool_config: Optional["ToolConfig"] = None, stream: bool = False, ) -> Union["GenerationResponse", AsyncIterable["GenerationResponse"],]: """Generates content asynchronously. @@ -377,6 +433,7 @@ async def generate_content_async( generation_config: Parameters for the generation. safety_settings: Safety settings as a mapping from HarmCategory to HarmBlockThreshold. tools: A list of tools (functions) that the model can try calling. + tool_config: Config shared for all tools provided in the request. stream: Whether to stream the response. Returns: @@ -389,6 +446,7 @@ async def generate_content_async( generation_config=generation_config, safety_settings=safety_settings, tools=tools, + tool_config=tool_config, ) else: return await self._generate_content_async( @@ -396,6 +454,7 @@ async def generate_content_async( generation_config=generation_config, safety_settings=safety_settings, tools=tools, + tool_config=tool_config, ) def _generate_content( @@ -405,6 +464,7 @@ def _generate_content( generation_config: Optional[GenerationConfigType] = None, safety_settings: Optional[SafetySettingsType] = None, tools: Optional[List["Tool"]] = None, + tool_config: Optional["ToolConfig"] = None, ) -> "GenerationResponse": """Generates content. @@ -419,6 +479,7 @@ def _generate_content( generation_config: Parameters for the generation. safety_settings: Safety settings as a mapping from HarmCategory to HarmBlockThreshold. tools: A list of tools (functions) that the model can try calling. + tool_config: Config shared for all tools provided in the request. Returns: A single GenerationResponse object @@ -428,6 +489,7 @@ def _generate_content( generation_config=generation_config, safety_settings=safety_settings, tools=tools, + tool_config=tool_config, ) gapic_response = self._prediction_client.generate_content(request=request) return self._parse_response(gapic_response) @@ -439,6 +501,7 @@ async def _generate_content_async( generation_config: Optional[GenerationConfigType] = None, safety_settings: Optional[SafetySettingsType] = None, tools: Optional[List["Tool"]] = None, + tool_config: Optional["ToolConfig"] = None, ) -> "GenerationResponse": """Generates content asynchronously. @@ -453,6 +516,7 @@ async def _generate_content_async( generation_config: Parameters for the generation. safety_settings: Safety settings as a mapping from HarmCategory to HarmBlockThreshold. tools: A list of tools (functions) that the model can try calling. + tool_config: Config shared for all tools provided in the request. Returns: An awaitable for a single GenerationResponse object @@ -462,6 +526,7 @@ async def _generate_content_async( generation_config=generation_config, safety_settings=safety_settings, tools=tools, + tool_config=tool_config, ) gapic_response = await self._prediction_async_client.generate_content( request=request @@ -475,6 +540,7 @@ def _generate_content_streaming( generation_config: Optional[GenerationConfigType] = None, safety_settings: Optional[SafetySettingsType] = None, tools: Optional[List["Tool"]] = None, + tool_config: Optional["ToolConfig"] = None, ) -> Iterable["GenerationResponse"]: """Generates content. @@ -489,6 +555,7 @@ def _generate_content_streaming( generation_config: Parameters for the generation. safety_settings: Safety settings as a mapping from HarmCategory to HarmBlockThreshold. tools: A list of tools (functions) that the model can try calling. + tool_config: Config shared for all tools provided in the request. Yields: A stream of GenerationResponse objects @@ -498,6 +565,7 @@ def _generate_content_streaming( generation_config=generation_config, safety_settings=safety_settings, tools=tools, + tool_config=tool_config, ) response_stream = self._prediction_client.stream_generate_content( request=request @@ -512,6 +580,7 @@ async def _generate_content_streaming_async( generation_config: Optional[GenerationConfigType] = None, safety_settings: Optional[SafetySettingsType] = None, tools: Optional[List["Tool"]] = None, + tool_config: Optional["ToolConfig"] = None, ) -> AsyncIterable["GenerationResponse"]: """Generates content asynchronously. @@ -526,6 +595,7 @@ async def _generate_content_streaming_async( generation_config: Parameters for the generation. safety_settings: Safety settings as a mapping from HarmCategory to HarmBlockThreshold. tools: A list of tools (functions) that the model can try calling. + tool_config: Config shared for all tools provided in the request. Returns: An awaitable for a stream of GenerationResponse objects @@ -535,6 +605,7 @@ async def _generate_content_streaming_async( generation_config=generation_config, safety_settings=safety_settings, tools=tools, + tool_config=tool_config, ) response_stream = await self._prediction_async_client.stream_generate_content( request=request @@ -963,10 +1034,6 @@ def _send_message_streaming( full_response = None for chunk in stream: chunks.append(chunk) - if full_response: - _append_response(full_response, chunk) - else: - full_response = chunk # By default we're not adding incomplete interactions to history. if self._response_validator is not None: self._response_validator( @@ -974,6 +1041,10 @@ def _send_message_streaming( request_contents=request_history, response_chunks=chunks, ) + if full_response: + _append_response(full_response, chunk) + else: + full_response = chunk yield chunk if not full_response: return @@ -1030,10 +1101,6 @@ async def async_generator(): full_response = None async for chunk in stream: chunks.append(chunk) - if full_response: - _append_response(full_response, chunk) - else: - full_response = chunk # By default we're not adding incomplete interactions to history. if self._response_validator is not None: self._response_validator( @@ -1041,7 +1108,10 @@ async def async_generator(): request_contents=request_history, response_chunks=chunks, ) - + if full_response: + _append_response(full_response, chunk) + else: + full_response = chunk yield chunk if not full_response: return @@ -1289,6 +1359,77 @@ def __repr__(self) -> str: return self._raw_tool.__repr__() +class ToolConfig: + r"""Config shared for all tools provided in the request. + + Usage: + Create ToolConfig + ``` + tool_config = ToolConfig( + function_calling_config=ToolConfig.FunctionCallingConfig( + mode=ToolConfig.FunctionCallingConfig.Mode.ANY, + allowed_function_names=["get_current_weather_func"], + )) + ``` + Use ToolConfig in `GenerativeModel.generate_content`: + ``` + model = GenerativeModel("gemini-pro") + print(model.generate_content( + "What is the weather like in Boston?", + # You can specify tools when creating a model to avoid having to send them with every request. + tools=[weather_tool], + tool_config=tool_config, + )) + ``` + Use ToolConfig in chat: + ``` + model = GenerativeModel( + "gemini-pro", + # You can specify tools when creating a model to avoid having to send them with every request. + tools=[weather_tool], + tool_config=tool_config, + ) + chat = model.start_chat() + print(chat.send_message("What is the weather like in Boston?")) + print(chat.send_message( + Part.from_function_response( + name="get_current_weather", + response={ + "content": {"weather_there": "super nice"}, + } + ), + )) + ``` + """ + + class FunctionCallingConfig: + Mode = gapic_tool_types.FunctionCallingConfig.Mode + + def __init__( + self, + mode: "ToolConfig.FunctionCallingConfig.Mode", + allowed_function_names: Optional[List[str]] = None, + ): + """Constructs FunctionCallingConfig. + + Args: + mode: Enum describing the function calling mode + allowed_function_names: A list of allowed function names + (must match from Tool). Only set when the Mode is ANY. + """ + self._gapic_function_calling_config = ( + gapic_tool_types.FunctionCallingConfig( + mode=mode, + allowed_function_names=allowed_function_names, + ) + ) + + def __init__(self, function_calling_config: "ToolConfig.FunctionCallingConfig"): + self._gapic_tool_config = gapic_tool_types.ToolConfig( + function_calling_config=function_calling_config._gapic_function_calling_config + ) + + class FunctionDeclaration: r"""A representation of a function declaration. @@ -1431,9 +1572,13 @@ def from_func(cls, func: Callable[..., Any]) -> "CallableFunctionDeclaration": Returns: CallableFunctionDeclaration. """ - from vertexai.generative_models import _function_calling_utils + from vertexai.generative_models import ( + _function_calling_utils, + ) - function_schema = _function_calling_utils.generate_json_schema_from_function(func) + function_schema = _function_calling_utils.generate_json_schema_from_function( + func + ) # Getting out the description first since it will be removed from the schema. function_description = function_schema["description"] function_schema = ( @@ -1738,6 +1883,59 @@ def _image(self) -> "Image": return Image.from_bytes(data=self._raw_part.inline_data.data) +class SafetySetting: + """Parameters for the generation.""" + + HarmCategory = gapic_content_types.HarmCategory + HarmBlockMethod = gapic_content_types.SafetySetting.HarmBlockMethod + HarmBlockThreshold = gapic_content_types.SafetySetting.HarmBlockThreshold + + def __init__( + self, + *, + category: "SafetySetting.HarmCategory", + threshold: "SafetySetting.HarmBlockThreshold", + method: Optional["SafetySetting.HarmBlockMethod"] = None, + ): + r"""Safety settings. + + Args: + category: Harm category. + threshold: The harm block threshold. + method: Specify if the threshold is used for probability or severity + score. If not specified, the threshold is used for probability + score. + """ + self._raw_safety_setting = gapic_content_types.SafetySetting( + category=category, + threshold=threshold, + method=method, + ) + + @classmethod + def _from_gapic( + cls, + raw_safety_setting: gapic_content_types.SafetySetting, + ) -> "SafetySetting": + response = cls( + category=raw_safety_setting.category, + threshold=raw_safety_setting.threshold, + ) + response._raw_safety_setting = raw_safety_setting + return response + + @classmethod + def from_dict(cls, safety_setting_dict: Dict[str, Any]) -> "SafetySetting": + raw_safety_setting = gapic_content_types.SafetySetting(safety_setting_dict) + return cls._from_gapic(raw_safety_setting=raw_safety_setting) + + def to_dict(self) -> Dict[str, Any]: + return type(self._raw_safety_setting).to_dict(self._raw_safety_setting) + + def __repr__(self): + return self._raw_safety_setting.__repr__() + + class grounding: # pylint: disable=invalid-name """Grounding namespace.""" diff --git a/vertexai/language_models/_language_models.py b/vertexai/language_models/_language_models.py index d2c514337b..3637e8c7fe 100644 --- a/vertexai/language_models/_language_models.py +++ b/vertexai/language_models/_language_models.py @@ -3448,6 +3448,7 @@ class _PreviewCodeGenerationModel(CodeGenerationModel, _CountTokensCodeGeneratio _SUPPORTED_RLHF_MODELS = { "text-bison@001", "chat-bison@001", + "text-bison@002", } diff --git a/vertexai/preview/evaluation/__init__.py b/vertexai/preview/evaluation/__init__.py new file mode 100644 index 0000000000..67895b4377 --- /dev/null +++ b/vertexai/preview/evaluation/__init__.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Rapid GenAI Evaluation Module.""" + +from vertexai.preview.evaluation import _base +from vertexai.preview.evaluation import _eval_tasks +from vertexai.preview.evaluation import metrics +from vertexai.preview.evaluation import prompt_template + + +EvalResult = _base.EvalResult +EvalTask = _eval_tasks.EvalTask +CustomMetric = metrics.CustomMetric +make_metric = metrics.make_metric +PromptTemplate = prompt_template.PromptTemplate + +__all__ = [ + "CustomMetric", + "EvalResult", + "EvalTask", + "make_metric", + "PromptTemplate", +] diff --git a/vertexai/preview/evaluation/_base.py b/vertexai/preview/evaluation/_base.py new file mode 100644 index 0000000000..588e1e6eac --- /dev/null +++ b/vertexai/preview/evaluation/_base.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Base classes for evaluation.""" + + +import dataclasses +from typing import Dict, List, Optional, Union, TYPE_CHECKING + +from google.cloud.aiplatform_v1beta1.services import ( + evaluation_service as gapic_evaluation_services, +) +from vertexai.preview.evaluation.metrics import ( + _base as metrics_base, +) + +if TYPE_CHECKING: + import pandas as pd + + +@dataclasses.dataclass +class EvaluationRunConfig: + """Evaluation Run Configurations. + + Attributes: + dataset: The dataset to evaluate. + metrics: The list of metric names to evaluate, or a metrics bundle for an + evaluation task, or custom metric instances. + column_map: The dictionary of column name overrides in the dataset. + client: The asynchronous evaluation client. + """ + + dataset: "pd.DataFrame" + metrics: List[Union[str, metrics_base.CustomMetric]] + column_map: Dict[str, str] + client: gapic_evaluation_services.EvaluationServiceAsyncClient + + def validate_dataset_column(self, column_name: str) -> None: + """Validates that the column names in the column map are in the dataset. + + Args: + column_name: The column name to validate. + + Raises: + KeyError: If any of the column names are not in the dataset. + """ + if self.column_map.get(column_name, column_name) not in self.dataset.columns: + raise KeyError( + f"Required column `{self.column_map.get(column_name, column_name)}`" + " not found in the eval dataset. The columns in the provided dataset" + f" are {self.dataset.columns}." + ) + + +@dataclasses.dataclass +class EvalResult: + """Evaluation result. + + Attributes: + summary_metrics: The summary evaluation metrics for an evaluation run. + metrics_table: A table containing eval inputs, ground truth, and metrics per + row. + """ + + summary_metrics: Dict[str, float] + metrics_table: Optional["pd.DataFrame"] = None diff --git a/vertexai/preview/evaluation/_eval_tasks.py b/vertexai/preview/evaluation/_eval_tasks.py new file mode 100644 index 0000000000..cf86d12710 --- /dev/null +++ b/vertexai/preview/evaluation/_eval_tasks.py @@ -0,0 +1,411 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, Callable, Dict, List, Literal, Optional, TYPE_CHECKING, Union +import uuid + +from google.api_core import exceptions +import vertexai +from google.cloud.aiplatform import base +from google.cloud.aiplatform.metadata import metadata +from vertexai import generative_models +from vertexai.preview.evaluation import _base as eval_base +from vertexai.preview.evaluation import _evaluation +from vertexai.preview.evaluation import utils +from vertexai.preview.evaluation.metrics import ( + _base as metrics_base, +) + +if TYPE_CHECKING: + import pandas as pd + +# pylint: disable=g-import-not-at-top +try: + from IPython import display as IPython_display +except ImportError: + IPython_display = None + +_LOGGER = base.Logger(__name__) + +EvalResult = eval_base.EvalResult +GenerativeModel = generative_models.GenerativeModel + + +class EvalTask: + """A class representing an EvalTask. + + An Evaluation Tasks is defined to measure the model's ability to perform a + certain task in response to specific prompts or inputs. Evaluation tasks must + contain an evaluation dataset, and a list of metrics to evaluate. Evaluation + tasks help developers compare propmpt templates, track experiments, compare + models and their settings, and assess the quality of the model's generated + text. + + Dataset details: + Default dataset column names: + * content_column_name: "content" + * reference_column_name: "reference" + * response_column_name: "response" + Requirement for different use cases: + * Bring your own prediction: A `response` column is required. Response + column name can be customized by providing `response_column_name` + parameter. + * Without prompt template: A column representing the input prompt to the + model is required. If `content_column_name` is not specified, the + eval dataset requires `content` column by default. The response + column is not used if present and new responses from the model are + generated with the content column and used for evaluation. + * With prompt template: Dataset must contain column names corresponding to + the placeholder names in the prompt template. For example, if prompt + template is "Instruction: {instruction}, context: {context}", the + dataset must contain `instruction` and `context` column. + + Metrics Details: + The supported metrics, metric bundle descriptions, grading rubrics, and + the required input fields can be found on the Vertex AI public + documentation. + + Usage: + 1. To perform bring your own prediction evaluation, provide the model + responses in the response column in the dataset. The response column name + is "response" by default, or specify `response_column_name` parameter to + customize. + + ``` + eval_dataset = pd.DataFrame({ + "reference": [...], + "response" : [...], + }) + eval_task = EvalTask( + dataset=eval_dataset, + metrics=["bleu", "rouge_l_sum", "coherence", "fluency"], + experiment="my-experiment", + ) + eval_result = eval_task.evaluate( + experiment_run_name="eval-experiment-run" + ) + ``` + + 2. To perform evaluation with built-in Gemini model inference, specify the + `model` parameter with a GenerativeModel instance. The default query + column name to the model is `content`. + + ``` + eval_dataset = pd.DataFrame({ + "reference": [...], + "content" : [...], + }) + result = EvalTask( + dataset=eval_dataset, + metrics=["exact_match", "bleu", "rouge_1", "rouge_2", + "rouge_l_sum"], + experiment="my-experiment", + ).evaluate( + model=GenerativeModel("gemini-pro"), + experiment_run_name="gemini-pro-eval-run" + ) + ``` + + 3. If a `prompt_template` is specified, the `content` column is not required. + Prompts can be assembled from the evaluation dataset, and all placeholder + names must be present in the dataset columns. + ``` + eval_dataset = pd.DataFrame({ + "context" : [...], + "instruction": [...], + "reference" : [...], + }) + result = EvalTask( + dataset=eval_dataset, + metrics=["summarization_quality"], + ).evaluate( + model=model, + prompt_template="{instruction}. Article: {context}. Summary:", + ) + ``` + + 4. To perform evaluation with custom model inference, specify the `model` + parameter with a custom prediction function. The `content` column in the + dataset is used to generate predictions with the custom model function for + evaluation. + + ``` + def custom_model_fn(input: str) -> str: + response = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "user", "content": input} + ] + ) + return response.choices[0].message.content + + eval_dataset = pd.DataFrame({ + "content" : [...], + "reference": [...], + }) + result = EvalTask( + dataset=eval_dataset, + metrics=["text_generation_similarity","text_generation_quality"], + experiment="my-experiment", + ).evaluate( + model=custom_model_fn, + experiment_run_name="gpt-eval-run" + ) + ``` + """ + + _resource_noun = "evalTasks" + + def __init__( + self, + *, + dataset: Union["pd.DataFrame", str, Dict[str, Any]], + metrics: List[ + Union[ + Literal[ + "exact_match", + "bleu", + "rouge_1", + "rouge_2", + "rouge_l", + "rouge_l_sum", + "coherence", + "fluency", + "safety", + "groundedness", + "fulfillment", + "summarization_quality", + "summarization_helpfulness", + "summarization_verbosity", + "question_answering_quality", + "question_answering_relevance", + "question_answering_helpfulness", + "question_answering_correctness", + "text_generation_similarity", + "text_generation_quality", + "text_generation_instruction_following", + "text_generation_safety", + "text_generation_factuality", + "summarization_pointwise_reference_free", + "qa_pointwise_reference_free", + "qa_pointwise_reference_based", + "tool_call_quality", + ], + metrics_base.CustomMetric, + ] + ], + experiment: Optional[str] = None, + content_column_name: str = "content", + reference_column_name: str = "reference", + response_column_name: str = "response", + ): + """Initializes an EvalTask. + + Args: + dataset: The dataset to be evaluated. + Supports the following dataset formats: + * pandas.DataFrame: Used directly for evaluation. + * Dict: Converted to a pandas DataFrame before evaluation. + * str: Interpreted as a file path or URI. Supported formats include: + * Local JSONL or CSV files: Loaded from the local filesystem. + * GCS JSONL or CSV files: Loaded from Google Cloud Storage + (e.g., 'gs://bucket/data.csv'). + * BigQuery table URI: Loaded from Google Cloud BigQuery + (e.g., 'bq://project-id.dataset.table_name'). + metrics: The list of metrics names to be evaluated, or a metrics + bundle for an evaluation task, or custom metric instances. + experiment: The name of the experiment to log the evaluations to. + content_column_name: The column name of content in the dataset to send to + the model. If not set, default to `content`. + reference_column_name: The column name of ground truth in the dataset. If + not set, default to `reference`. + response_column_name: The column name of model response in the dataset. If + not set, default to `response`. + """ + self.dataset = utils.load_dataset(dataset) + self.metrics = metrics + self.experiment = experiment + self.content_column_name = content_column_name + self.reference_column_name = reference_column_name + self.response_column_name = response_column_name + + def _evaluate_with_experiment( + self, + model: Optional[Union[GenerativeModel, Callable[[str], str]]] = None, + prompt_template: Optional[str] = None, + experiment_run_name: Optional[str] = None, + response_column_name: str = "response", + ) -> EvalResult: + """Runs an evaluation for the EvalTask with an experiment. + + Args: + model: A GenerativeModel instance or a custom model function to generate + responses to evaluate. If not provided, the evaluation is computed with + the `response` column in the `dataset`. + prompt_template: The prompt template to use for the evaluation. If not + set, the prompt template that was used to create the EvalTask will be + used. + experiment_run_name: The name of the experiment run to log the evaluation + to if an experiment is set for this EvalTask. If not provided, a random + unique experiment run name is used. + response_column_name: The column name of model response in the dataset. If + not set, default to `response`. + + Returns: + The evaluation result. + """ + self._validate_experiment_run() + with vertexai.preview.start_run(experiment_run_name): + self._log_eval_experiment_param(model, prompt_template) + eval_result = _evaluation.evaluate( + dataset=self.dataset, + metrics=self.metrics, + model=model, + prompt_template=prompt_template, + content_column_name=self.content_column_name, + reference_column_name=self.reference_column_name, + response_column_name=response_column_name or self.response_column_name, + ) + try: + vertexai.preview.log_metrics(eval_result.summary_metrics) + except (ValueError, TypeError, exceptions.InvalidArgument) as e: + _LOGGER.warning(f"Experiment metrics logging failed: {str(e)}") + return eval_result + + def evaluate( + self, + *, + model: Optional[Union[GenerativeModel, Callable[[str], str]]] = None, + prompt_template: Optional[str] = None, + experiment_run_name: Optional[str] = None, + response_column_name: str = "response", + ) -> EvalResult: + """Runs an evaluation for the EvalTask. + + Args: + model: A GenerativeModel instance or a custom model function to generate + responses to evaluate. If not provided, the evaluation is computed with + the `response` column in the `dataset`. + prompt_template: The prompt template to use for the evaluation. If not + set, the prompt template that was used to create the EvalTask will be + used. + experiment_run_name: The name of the experiment run to log the evaluation + to if an experiment is set for this EvalTask. If not provided, a random + unique experiment run name is used. + response_column_name: The column name of model response in the dataset. If + not set, default to `response`. + + Returns: + The evaluation result. + """ + global_experiment_name = metadata._experiment_tracker.experiment_name + if experiment_run_name and not self.experiment and not global_experiment_name: + raise ValueError( + "Experiment is not set. Please initialize EvalTask with an" + " experiment, or initialize a global experiment with " + "`vertexai.init(experiment='experiment_name')`for logging this" + " evaluation run." + ) + + experiment_run_name = experiment_run_name or f"{uuid.uuid4()}" + + if self.experiment and global_experiment_name: + metadata._experiment_tracker.set_experiment( + experiment=self.experiment, backing_tensorboard=False + ) + eval_result = self._evaluate_with_experiment( + model, prompt_template, experiment_run_name, response_column_name + ) + metadata._experiment_tracker.set_experiment( + experiment=global_experiment_name, backing_tensorboard=False + ) + elif self.experiment and not global_experiment_name: + metadata._experiment_tracker.set_experiment( + experiment=self.experiment, backing_tensorboard=False + ) + eval_result = self._evaluate_with_experiment( + model, prompt_template, experiment_run_name, response_column_name + ) + metadata._experiment_tracker.reset() + elif not self.experiment and global_experiment_name: + eval_result = self._evaluate_with_experiment( + model, prompt_template, experiment_run_name, response_column_name + ) + else: + eval_result = _evaluation.evaluate( + dataset=self.dataset, + metrics=self.metrics, + model=model, + prompt_template=prompt_template, + content_column_name=self.content_column_name, + reference_column_name=self.reference_column_name, + response_column_name=response_column_name or self.response_column_name, + ) + return eval_result + + def _validate_experiment_run(self) -> None: + """Checks if an experiment run already exists.""" + if metadata._experiment_tracker.experiment_run: + raise ValueError( + "Experiment run already exists. Please specify the name of the" + " experiment run to assign current session with in this evaluate" + " method." + ) + + def _log_eval_experiment_param( + self, + model: Optional[Union[GenerativeModel, Callable[[str], str]]] = None, + prompt_template: Optional[str] = None, + ) -> None: + """Logs variable input parameters of an evaluation to an experiment run.""" + model_metadata = {} + + if prompt_template is not None: + model_metadata.update({"prompt_template": prompt_template}) + + if isinstance(model, GenerativeModel): + model_metadata.update( + { + "model_name": model._model_name, + } + ) + + if model._generation_config and isinstance(model._generation_config, dict): + # TODO(b/311221071): support logging GenerationConfig type. + model_metadata.update(**model._generation_config) + + if model._safety_settings and isinstance(model._safety_settings, dict): + # TODO(b/311221071): support logging List[SafetySetting] type. + safety_settings = model._safety_settings + safety_settings_as_str = { + category.name: threshold.name + for category, threshold in safety_settings.items() + } + model_metadata.update(safety_settings_as_str) + + if model_metadata: + _LOGGER.info(f"Logging Rapid Eval experiment metadata: {model_metadata}") + try: + vertexai.preview.log_params(model_metadata) + except (ValueError, TypeError) as e: + _LOGGER.warning(f"Experiment metadata logging failed: {str(e)}") + + def display_runs(self): + """Displays experiment runs associated with this EvalTask.""" + if not self.experiment: + raise ValueError("Experiment is not set.") + elif IPython_display: + IPython_display.display(vertexai.preview.get_experiment_df(self.experiment)) diff --git a/vertexai/preview/evaluation/_evaluation.py b/vertexai/preview/evaluation/_evaluation.py new file mode 100644 index 0000000000..1c24664060 --- /dev/null +++ b/vertexai/preview/evaluation/_evaluation.py @@ -0,0 +1,567 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import asyncio +import collections +from typing import Any, Dict, List, Optional, Set, TYPE_CHECKING, Tuple, Union, Callable + +from google.cloud.aiplatform import base +from google.cloud.aiplatform_v1beta1.types import ( + content as gapic_content_types, +) +from vertexai import generative_models +from vertexai.preview.evaluation import _base as evaluation_base +from vertexai.preview.evaluation import constants +from vertexai.preview.evaluation import ( + prompt_template as prompt_template_base, +) +from vertexai.preview.evaluation import utils +from vertexai.preview.evaluation.metrics import ( + _base as metrics_base, +) +from vertexai.preview.evaluation.metrics import ( + _instance_evaluation, +) + + +if TYPE_CHECKING: + import pandas as pd + +_LOGGER = base.Logger(__name__) +_METRICS_BUNDLE_TO_METRIC_NAMES = { + constants.MetricBundle.TEXT_GENERATION_SIMILARITY: ( + constants.Metric.EXACT_MATCH, + constants.Metric.BLEU, + constants.Metric.ROUGE_1, + constants.Metric.ROUGE_2, + constants.Metric.ROUGE_L, + constants.Metric.ROUGE_L_SUM, + ), + constants.MetricBundle.TEXT_GENERATION_QUALITY: ( + constants.Metric.COHERENCE, + constants.Metric.FLUENCY, + ), + constants.MetricBundle.TOOL_CALL_QUALITY: ( + constants.Metric.TOOL_CALL_VALID, + constants.Metric.TOOL_NAME_MATCH, + constants.Metric.TOOL_PARAMETER_KEY_MATCH, + constants.Metric.TOOL_PARAMETER_KV_MATCH, + ), + constants.MetricBundle.TEXT_GENERATION_INSTRUCTION_FOLLOWING: ( + constants.Metric.FULFILLMENT, + ), + constants.MetricBundle.TEXT_GENERATION_SAFETY: (constants.Metric.SAFETY,), + constants.MetricBundle.TEXT_GENERATION_FACTUALITY: (constants.Metric.GROUNDEDNESS,), + constants.MetricBundle.SUMMARIZATION_POINTWISE_REFERENCE_FREE: ( + constants.Metric.SUMMARIZATION_QUALITY, + constants.Metric.SUMMARIZATION_HELPFULNESS, + constants.Metric.SUMMARIZATION_VERBOSITY, + ), + constants.MetricBundle.QA_POINTWISE_REFERENCE_FREE: ( + constants.Metric.QUESTION_ANSWERING_QUALITY, + constants.Metric.QUESTION_ANSWERING_RELEVANCE, + constants.Metric.QUESTION_ANSWERING_HELPFULNESS, + ), + constants.MetricBundle.QA_POINTWISE_REFERENCE_BASED: ( + constants.Metric.QUESTION_ANSWERING_CORRECTNESS, + ), +} +_SUCCESSFUL_FINISH_REASONS = [ + gapic_content_types.Candidate.FinishReason.STOP, + # Many responses have this finish reason + gapic_content_types.Candidate.FinishReason.FINISH_REASON_UNSPECIFIED, +] + + +def _replace_metric_bundle_with_metrics( + metrics_list: List[Union[str, metrics_base.CustomMetric]], +) -> List[str]: + """Replaces metric bundles with corresponding metrics. + + Args: + metrics_list: The original list containing metrics bundle names. + + Returns: + The modified metrics list containing only metric names. + """ + modified_list = [] + + for item in metrics_list: + if item in _METRICS_BUNDLE_TO_METRIC_NAMES.keys(): + modified_list.extend(_METRICS_BUNDLE_TO_METRIC_NAMES[item]) + else: + modified_list.append(item) + + return modified_list + + +def _compute_custom_metrics( + row_dict: Dict[str, Any], + custom_metrics: List[metrics_base.CustomMetric], +) -> Dict[str, Any]: + """Computes custom metrics for a row. + + Args: + row_dict: A dictionary of an instance in the eval dataset. + custom_metrics: A list of CustomMetrics. + + Returns: + A dictionary of an instance containing custom metric results. + + Raises: + KeyError: If the custom metric function does not return a valid output. + """ + for custom_metric in custom_metrics: + metric_output = custom_metric.metric_function(row_dict) + if custom_metric.name in metric_output: + row_dict[custom_metric.name] = metric_output[custom_metric.name] + else: + raise KeyError( + f"Custom metric score `{custom_metric.name}` not found in the metric" + f" output {metric_output}. Please make sure the custom metric" + " function is valid, and the output dictionary uses" + f" `{custom_metric.name}` as the key for metric value." + ) + # Include additional metric results like explanation. + for key, value in metric_output.items(): + if key != custom_metric.name: + row_dict[f"{custom_metric.name}/{key}"] = value + return row_dict + + +def _separate_custom_metrics( + metrics: List[str], +) -> Tuple[List[str], List[metrics_base.CustomMetric],]: + """Separates the metrics list into API and custom metrics.""" + custom_metrics = [] + api_metrics = [] + for metric in metrics: + if isinstance(metric, metrics_base.CustomMetric): + custom_metrics.append(metric) + else: + api_metrics.append(metric) + return api_metrics, custom_metrics + + +def _compute_summary_metrics( + evaluation_run_config: evaluation_base.EvaluationRunConfig, + metrics_table: "pd.DataFrame", +) -> Dict[str, Any]: + """Computes summary metrics. + + Args: + evaluation_run_config: Evaluation Run Configurations. + metrics_table: A dataframe containing per-instance metrics results. + + Returns: + A dictionary containing summary metrics results and statistics. + """ + summary_metrics = {} + summary_metrics[constants.MetricResult.ROW_COUNT_KEY] = metrics_table.shape[0] + for metric in evaluation_run_config.metrics: + try: + # TODO(b/325078638): implement additional aggregate methods. + summary_metrics[f"{str(metric)}/mean"] = metrics_table.loc[ + :, str(metric) + ].mean() + summary_metrics[f"{str(metric)}/std"] = metrics_table.loc[ + :, str(metric) + ].std() + except (ValueError, KeyError): + _LOGGER.warning( + f"Failed to compute metric statistics for {metric}. This metric" + " output contains error from the Autorater." + ) + continue + return summary_metrics + + +def _generate_response_from_gemini( + model: generative_models.GenerativeModel, prompt: str +) -> str: + """Generates response from Gemini model. + + Args: + model: The Gemini model instance. + prompt: The prompt to send to the model. + + Returns: + The response from the model. + + Raises: + RuntimeError if the prompt or the response for the prompt is blocked for + safety reasons. + """ + response = model.generate_content(prompt) + try: + if not response.candidates: + raise RuntimeError( + f"The model response was blocked due to {response._raw_response.prompt_feedback.block_reason.name}.\n" + f"Blocke reason message: {response._raw_response.prompt_feedback.block_reason_message}.\n" + "The input prompt may be blocked for safety reasons.", + f"Prompt: {prompt}.", + ) + else: + candidate = response.candidates[0] + if candidate.finish_reason not in _SUCCESSFUL_FINISH_REASONS: + raise RuntimeError( + "The model response did not completed successfully.\n" + f"Finish reason: {candidate.finish_reason}.\n" + f"Finish message: {candidate.finish_message}.\n" + f"Safety ratings: {candidate.safety_ratings}.\n" + "Please adjsut the model safety_settings, or try a different prompt." + ) + return response.candidates[0].content.parts[0].text + except Exception: + raise RuntimeError( + "Failed to generate response candidates from Gemini model.\n" + f"Response: {response}.\n" + f"Prompt: {prompt}." + ) + + +def _generate_response_from_gemini_model( + model: generative_models.GenerativeModel, + evaluation_run_config: evaluation_base.EvaluationRunConfig, +) -> None: + """Generates responses from Gemini model. + + Args: + model: The Gemini model instance. + evaluation_run_config: Evaluation Run Configurations. + """ + if ( + constants.Dataset.COMPLETED_PROMPT_COLUMN + in evaluation_run_config.dataset.columns + ): + evaluation_run_config.dataset[ + constants.Dataset.MODEL_RESPONSE_COLUMN + ] = evaluation_run_config.dataset[ + constants.Dataset.COMPLETED_PROMPT_COLUMN + ].apply( + lambda x: _generate_response_from_gemini(model, x) + ) + else: + evaluation_run_config.dataset[ + constants.Dataset.MODEL_RESPONSE_COLUMN + ] = evaluation_run_config.dataset[ + evaluation_run_config.column_map[constants.Dataset.CONTENT_COLUMN] + ].apply( + lambda x: _generate_response_from_gemini(model, x) + ) + + +def _generate_response_from_custom_model_fn( + model_fn: Callable[[str], str], + evaluation_run_config: evaluation_base.EvaluationRunConfig, +) -> None: + """Generates responses from a custom model function. + + Args: + model_fn: The custom model function. + evaluation_run_config: Evaluation Run Configurations. + """ + try: + if ( + constants.Dataset.COMPLETED_PROMPT_COLUMN + in evaluation_run_config.dataset.columns + ): + evaluation_run_config.dataset[ + constants.Dataset.MODEL_RESPONSE_COLUMN + ] = evaluation_run_config.dataset[ + constants.Dataset.COMPLETED_PROMPT_COLUMN + ].apply( + model_fn + ) + else: + evaluation_run_config.dataset[ + constants.Dataset.MODEL_RESPONSE_COLUMN + ] = evaluation_run_config.dataset[ + evaluation_run_config.column_map[constants.Dataset.CONTENT_COLUMN] + ].apply( + model_fn + ) + except (ValueError, IndexError) as e: + _LOGGER.warning(f"Failed to generate response from model function: {e}") + + +def _check_placeholder_columns_exist( + dataset: "pd.DataFrame", placeholder_names_set: Set[str] +) -> None: + """Checks if all placeholder names exist in the dataset columns. + + Args: + dataset: The dataset to evaluate. + placeholder_names_set: A set of placeholder names. + + Raises: + ValueError: If any placeholder names do not exist in the dataset columns + or the prompt template is invalid. + """ + actual_column_names_set = set(dataset.columns) + if not placeholder_names_set.issubset(actual_column_names_set): + missing_columns = placeholder_names_set - actual_column_names_set + raise ValueError( + "Failed to complete prompt template: The following column(s) are" + f" missing: {', '.join(missing_columns)}" + ) + + +def _complete_prompt_for_dataset( + evaluation_run_config: evaluation_base.EvaluationRunConfig, prompt_template: str +) -> None: + """Adds a column in dataset for completed prompts from placeholder columns. + + Args: + evaluation_run_config: Evaluation Run Configurations. + prompt_template: A prompt template string with placeholders that can be + formatted with dataset columns. + + Returns: + The completed prompt template string to send to the model. + + Raises: + ValueError: If any placeholder names do not exist in the dataset columns + or the prompt template is invalid. + """ + prompt_template = prompt_template_base.PromptTemplate(prompt_template) + _check_placeholder_columns_exist( + evaluation_run_config.dataset, prompt_template.placeholders + ) + + try: + evaluation_run_config.dataset[ + constants.Dataset.COMPLETED_PROMPT_COLUMN + ] = evaluation_run_config.dataset.apply( + lambda row: str( + prompt_template.assemble( + **row[list(prompt_template.placeholders)].astype(str).to_dict(), + ) + ), + axis=1, + ) + except Exception as e: + raise ValueError(f"Failed to complete prompt: {e}") from e + + +def _parse_metric_results_to_dataframe( + instance_df: "pd.DataFrame", results: Dict[str, Any] +) -> Dict[str, Any]: + """Parses metric results to a pandas dataframe. + + Args: + instance_df: A dataframe containing per-instance metrics results. + results: A dictionary containing metric results. + + Returns: + A dataframe containing per-instance metrics results. Each metric result + can contain metric score, explanation, and confidence. + """ + try: + import pandas as pd + except ImportError: + raise ImportError( + 'Pandas is not installed. Please install the SDK using "pip install' + ' google-cloud-aiplatform[rapid_evaluation]"' + ) + metrics_table = pd.DataFrame(dict(zip(instance_df.columns, instance_df.values.T))) + + for metric_name, metric_results in results.items(): + scores = [ + result.get(constants.MetricResult.SCORE_KEY) for result in metric_results + ] + if ( + metric_name + in constants.Metric.MODEL_BASED_METRIC_LIST + + constants.Metric.PAIRWISE_METRIC_LIST + ): + explanations = [ + result.get(constants.MetricResult.EXPLANATION_KEY) + for result in metric_results + ] + confidences = [ + result.get(constants.MetricResult.CONFIDENCE_KEY) + for result in metric_results + ] + metrics_table[ + f"{metric_name}/{constants.MetricResult.EXPLANATION_KEY}" + ] = explanations + metrics_table[ + f"{metric_name}/{constants.MetricResult.CONFIDENCE_KEY}" + ] = confidences + + metrics_table[metric_name] = scores + + return metrics_table + + +async def _compute_metrics( + evaluation_run_config: evaluation_base.EvaluationRunConfig, +) -> Tuple[Dict[str, Any], "pd.DataFrame"]: + """Computes the metrics for the dataset. + + Args: + evaluation_run_config: Evaluation Run Configurations. + + Returns: + The evaluation results for the input metrics. + + Raises: + RuntimeError: The number of responses does not match the number of metrics. + """ + try: + import pandas as pd + except ImportError: + raise ImportError( + 'Pandas is not installed. Please install the SDK using "pip install' + ' google-cloud-aiplatform[rapid_evaluation]"' + ) + + api_metrics, custom_metrics = _separate_custom_metrics( + evaluation_run_config.metrics + ) + instance_list = [] + tasks_by_metric = collections.defaultdict(list) + for _, row in evaluation_run_config.dataset.iterrows(): + row_dict = _compute_custom_metrics(row.to_dict(), custom_metrics) + + instance_list.append(row_dict) + + for metric_name in api_metrics: + task = asyncio.create_task( + _instance_evaluation.evaluate_instances_async( + client=evaluation_run_config.client, + request=_instance_evaluation.build_request( + metric_name=metric_name, + row_dict=row_dict, + evaluation_run_config=evaluation_run_config, + ), + ) + ) + tasks_by_metric[metric_name].append(task) + + results_dict = { + metric_name: await asyncio.gather(*tasks) + for metric_name, tasks in tasks_by_metric.items() + } + + instance_df = pd.DataFrame.from_dict(instance_list) + metrics_table = _parse_metric_results_to_dataframe(instance_df, results_dict) + + summary_metrics = _compute_summary_metrics(evaluation_run_config, metrics_table) + return summary_metrics, metrics_table + + +def evaluate( + dataset: "pd.DataFrame", + metrics: List[Union[str, metrics_base.CustomMetric]], + *, + model: Optional[ + Union[generative_models.GenerativeModel, Callable[[str], str]] + ] = None, + prompt_template: Optional[str] = None, + content_column_name: str = "content", + reference_column_name: str = "reference", + response_column_name: str = "response", + context_column_name: str = "context", + instruction_column_name: str = "instruction", +) -> evaluation_base.EvalResult: + """Runs the evaluation for metrics. + + Args: + dataset: The dataset to evaluate. + metrics: The list of metrics names to evaluate, or a metrics bundle for an + evaluation task, or custom metric instances. + model: The GenerativeModel instance or a custom model function to generate + responses to evaluate. If not provided, the evaluation is computed with + the `response` column in the `dataset`. + prompt_template: A prompt template string compatible with `PromptTemplate` + class with placeholders that can be formatted with dataset columns to + create completed prompts. The placeholders can be represented in curly + braces `{placeholder}`, and must be included in the dataset columns if + specified. The placeholder names cannot contain spaces. + content_column_name: The column name of content in the dataset to send to + the model. If not set, default to `content`. + reference_column_name: The column name of ground truth in the dataset. If + not set, default to `reference`. + response_column_name: The column name of model response in the dataset. If + not set, default to `response`. + context_column_name: The column name of summary context in the dataset. If + not set, default to `context`. + instruction_column_name: The column name of the instruction prompt in the + dataset. If not set, default to `instruction`. + + Returns: + EvalResult with summary metrics and a metrics table for per-instance + metrics. + """ + + if not metrics: + raise ValueError("Metrics cannot be empty.") + + evaluation_run_config = evaluation_base.EvaluationRunConfig( + dataset=dataset, + metrics=_replace_metric_bundle_with_metrics(metrics), + column_map={ + constants.Dataset.CONTENT_COLUMN: content_column_name, + constants.Dataset.REFERENCE_COLUMN: reference_column_name, + constants.Dataset.MODEL_RESPONSE_COLUMN: response_column_name, + constants.Dataset.CONTEXT_COLUMN: context_column_name, + constants.Dataset.INSTRUCTION_COLUMN: instruction_column_name, + }, + client=utils.create_evaluation_service_async_client(), + ) + + if prompt_template: + _complete_prompt_for_dataset(evaluation_run_config, prompt_template) + + if model: + if prompt_template: + evaluation_run_config.validate_dataset_column( + constants.Dataset.COMPLETED_PROMPT_COLUMN + ) + else: + evaluation_run_config.validate_dataset_column( + constants.Dataset.CONTENT_COLUMN + ) + + if isinstance(model, generative_models.GenerativeModel): + _generate_response_from_gemini_model(model, evaluation_run_config) + elif callable(model): + _generate_response_from_custom_model_fn(model, evaluation_run_config) + else: + evaluation_run_config.validate_dataset_column( + constants.Dataset.MODEL_RESPONSE_COLUMN + ) + if set(evaluation_run_config.metrics).intersection( + set(constants.Metric.AUTOMATIC_METRIC_LIST) + ): + evaluation_run_config.validate_dataset_column( + constants.Dataset.REFERENCE_COLUMN + ) + + if asyncio.get_event_loop().is_running(): + asyncio.set_event_loop(asyncio.new_event_loop()) + loop = asyncio.get_event_loop() + + summary_metrics, metrics_table = loop.run_until_complete( + _compute_metrics(evaluation_run_config) + ) + + return evaluation_base.EvalResult( + summary_metrics=summary_metrics, metrics_table=metrics_table + ) diff --git a/vertexai/preview/evaluation/constants.py b/vertexai/preview/evaluation/constants.py new file mode 100644 index 0000000000..d25bdd7655 --- /dev/null +++ b/vertexai/preview/evaluation/constants.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Constants for evaluation.""" +import dataclasses + + +@dataclasses.dataclass(frozen=True) +class Metric: + """Namespace for Metrics.""" + + # Automatic Metrics. + EXACT_MATCH = "exact_match" + BLEU = "bleu" + ROUGE_1 = "rouge_1" + ROUGE_2 = "rouge_2" + ROUGE_L = "rouge_l" + ROUGE_L_SUM = "rouge_l_sum" + TOOL_CALL_VALID = "tool_call_valid" + TOOL_NAME_MATCH = "tool_name_match" + TOOL_PARAMETER_KEY_MATCH = "tool_parameter_key_match" + TOOL_PARAMETER_KV_MATCH = "tool_parameter_kv_match" + # Model-based Pointwise Metrics. + COHERENCE = "coherence" + FLUENCY = "fluency" + SAFETY = "safety" + GROUNDEDNESS = "groundedness" + FULFILLMENT = "fulfillment" + RESPONSE_RECALL = "response_recall" + SUMMARIZATION_QUALITY = "summarization_quality" + SUMMARIZATION_HELPFULNESS = "summarization_helpfulness" + SUMMARIZATION_VERBOSITY = "summarization_verbosity" + QUESTION_ANSWERING_QUALITY = "question_answering_quality" + QUESTION_ANSWERING_RELEVANCE = "question_answering_relevance" + QUESTION_ANSWERING_HELPFULNESS = "question_answering_helpfulness" + QUESTION_ANSWERING_CORRECTNESS = "question_answering_correctness" + RAG_CONTEXT_RECALL = "rag_context_recall" + # Side-by-side(SxS) Pairwise Metrics. + PAIRWISE_SUMMARIZATION_QUALITY = "pairwise_summarization_quality" + PAIRWISE_QUESTION_ANSWERING_QUALITY = "pairwise_question_answering_quality" + + AUTOMATIC_METRIC_LIST = ( + EXACT_MATCH, + BLEU, + ROUGE_1, + ROUGE_2, + ROUGE_L, + ROUGE_L_SUM, + TOOL_CALL_VALID, + TOOL_NAME_MATCH, + TOOL_PARAMETER_KEY_MATCH, + TOOL_PARAMETER_KV_MATCH, + ) + MODEL_BASED_METRIC_LIST = ( + COHERENCE, + FLUENCY, + SAFETY, + GROUNDEDNESS, + FULFILLMENT, + RESPONSE_RECALL, + SUMMARIZATION_QUALITY, + SUMMARIZATION_HELPFULNESS, + SUMMARIZATION_VERBOSITY, + QUESTION_ANSWERING_QUALITY, + QUESTION_ANSWERING_RELEVANCE, + QUESTION_ANSWERING_HELPFULNESS, + QUESTION_ANSWERING_CORRECTNESS, + RAG_CONTEXT_RECALL, + ) + PAIRWISE_METRIC_LIST = ( + PAIRWISE_SUMMARIZATION_QUALITY, + PAIRWISE_QUESTION_ANSWERING_QUALITY, + ) + + +@dataclasses.dataclass(frozen=True) +class MetricResult: + ROW_COUNT_KEY = "row_count" + SCORE_KEY = "score" + EXPLANATION_KEY = "explanation" + CONFIDENCE_KEY = "confidence" + PAIRWISE_CHOICE_KEY = "pairwise_choice" + + # Automatic Metrics. + EXACT_MATCH_RESULTS = "exact_match_results" + BLEU_RESULTS = "bleu_results" + ROUGE_RESULTS = "rouge_results" + TOOL_CALL_VALID_RESULTS = "tool_call_valid_results" + TOOL_NAME_MATCH_RESULTS = "tool_name_match_results" + TOOL_PARAMETER_KEY_MATCH_RESULTS = "tool_parameter_key_match_results" + TOOL_PARAMETER_KV_MATCH_RESULTS = "tool_parameter_kv_match_results" + # Model-based Pointwise Metrics. + COHERENCE_RESULT = "coherence_result" + FLUENCY_RESULT = "fluency_result" + SAFETY_RESULT = "safety_result" + GROUNDEDNESS_RESULT = "groundedness_result" + FULFILLMENT_RESULT = "fulfillment_result" + RESPONSE_RECALL_RESULT = "response_recall_result" + SUMMARIZATION_QUALITY_RESULT = "summarization_quality_result" + SUMMARIZATION_HELPFULNESS_RESULT = "summarization_helpfulness_result" + SUMMARIZATION_VERBOSITY_RESULT = "summarization_verbosity_result" + QUESTION_ANSWERING_QUALITY_RESULT = "question_answering_quality_result" + QUESTION_ANSWERING_RELEVANCE_RESULT = "question_answering_relevance_result" + QUESTION_ANSWERING_HELPFULNESS_RESULT = "question_answering_helpfulness_result" + QUESTION_ANSWERING_CORRECTNESS_RESULT = "question_answering_correctness_result" + RAG_CONTEXT_RECALL_RESULT = "rag_context_recall_result" + # Side-by-side(SxS) Pairwise Metrics. + PAIRWISE_SUMMARIZATION_QUALITY_RESULT = "pairwise_summarization_quality_result" + PAIRWISE_QUESTION_ANSWERING_QUALITY_RESULT = ( + "pairwise_question_answering_quality_result" + ) + + AUTOMATIC_METRIC_RESULTS_LIST = ( + EXACT_MATCH_RESULTS, + BLEU_RESULTS, + ROUGE_RESULTS, + TOOL_CALL_VALID_RESULTS, + TOOL_NAME_MATCH_RESULTS, + TOOL_PARAMETER_KEY_MATCH_RESULTS, + TOOL_PARAMETER_KV_MATCH_RESULTS, + ) + MODEL_BASED_METRIC_RESULT_LIST = ( + COHERENCE_RESULT, + FLUENCY_RESULT, + SAFETY_RESULT, + GROUNDEDNESS_RESULT, + FULFILLMENT_RESULT, + RESPONSE_RECALL_RESULT, + SUMMARIZATION_QUALITY_RESULT, + SUMMARIZATION_HELPFULNESS_RESULT, + SUMMARIZATION_VERBOSITY_RESULT, + QUESTION_ANSWERING_QUALITY_RESULT, + QUESTION_ANSWERING_RELEVANCE_RESULT, + QUESTION_ANSWERING_HELPFULNESS_RESULT, + QUESTION_ANSWERING_CORRECTNESS_RESULT, + RAG_CONTEXT_RECALL_RESULT, + ) + PAIRWISE_METRIC_RESULT_LIST = ( + PAIRWISE_SUMMARIZATION_QUALITY_RESULT, + PAIRWISE_QUESTION_ANSWERING_QUALITY_RESULT, + ) + + +@dataclasses.dataclass(frozen=True) +class MetricBundle: + """Namespace for MetricBundle.""" + + TEXT_GENERATION_SIMILARITY = "text_generation_similarity" + TEXT_GENERATION_QUALITY = "text_generation_quality" + TOOL_CALL_QUALITY = "tool_call_quality" + TEXT_GENERATION_INSTRUCTION_FOLLOWING = "text_generation_instruction_following" + TEXT_GENERATION_SAFETY = "text_generation_safety" + TEXT_GENERATION_FACTUALITY = "text_generation_factuality" + SUMMARIZATION_POINTWISE_REFERENCE_FREE = "summarization_pointwise_reference_free" + QA_POINTWISE_REFERENCE_FREE = "qa_pointwise_reference_free" + QA_POINTWISE_REFERENCE_BASED = "qa_pointwise_reference_based" + + +@dataclasses.dataclass(frozen=True) +class Dataset: + COMPLETED_PROMPT_COLUMN = "completed_prompt" + MODEL_RESPONSE_COLUMN = "response" + BASELINE_MODEL_RESPONSE_COLUMN = "baseline_model_response" + CONTEXT_COLUMN = "context" + REFERENCE_COLUMN = "reference" + CONTENT_COLUMN = "content" + INSTRUCTION_COLUMN = "instruction" diff --git a/vertexai/preview/evaluation/metrics/__init__.py b/vertexai/preview/evaluation/metrics/__init__.py new file mode 100644 index 0000000000..94d768a030 --- /dev/null +++ b/vertexai/preview/evaluation/metrics/__init__.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Evaluation Metrics Module.""" + +from vertexai.preview.evaluation.metrics import ( + _base, +) + +CustomMetric = _base.CustomMetric +make_metric = _base.make_metric + +__all__ = [ + "CustomMetric", + "make_metric", +] diff --git a/vertexai/preview/evaluation/metrics/_base.py b/vertexai/preview/evaluation/metrics/_base.py new file mode 100644 index 0000000000..35ab69aec5 --- /dev/null +++ b/vertexai/preview/evaluation/metrics/_base.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Any, Callable, Dict + + +class CustomMetric: + """The custom evaluation metric. + + Attributes: + name: The name of the metric. + metric_function: The evaluation function. Must use the dataset row/instance + as the metric_function input. Returns per-instance metric result as a + dictionary. The metric score must mapped to the CustomMetric.name as key. + """ + + def __init__( + self, + name: str, + metric_function: Callable[ + [Dict[str, Any]], + Dict[str, Any], + ], + ): + """Initializes the evaluation metric.""" + self.name = name + self.metric_function = metric_function + + def __str__(self): + return self.name + + +def make_metric( + name: str, metric_function: Callable[[Dict[str, Any]], Dict[str, Any]] +) -> CustomMetric: + """Makes a custom metric. + + Args: + name: The name of the metric + metric_function: The evaluation function. Must use the dataset row/instance + as the metric_function input. Returns per-instance metric result as a + dictionary. The metric score must mapped to the CustomMetric.name as key. + + Returns: + A CustomMetric instance, can be passed to evaluate() function. + """ + return CustomMetric(name, metric_function) diff --git a/vertexai/preview/evaluation/metrics/_instance_evaluation.py b/vertexai/preview/evaluation/metrics/_instance_evaluation.py new file mode 100644 index 0000000000..dc7a8ddf2b --- /dev/null +++ b/vertexai/preview/evaluation/metrics/_instance_evaluation.py @@ -0,0 +1,603 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Library for Metrics Computation with Evaluation Service Async Client.""" + +from typing import Any, Dict + +from google import api_core +from google.cloud.aiplatform import base +from google.cloud.aiplatform import initializer +from google.cloud.aiplatform_v1beta1.services import ( + evaluation_service as gapic_evaluation_services, +) +from google.cloud.aiplatform_v1beta1.types import ( + evaluation_service as gapic_evaluation_service_types, +) +from vertexai.preview.evaluation import ( + _base as eval_base, +) +from vertexai.preview.evaluation import constants + +from google.protobuf import json_format + +_LOGGER = base.Logger(__name__) +_METRIC_NAME_TO_METRIC_SPEC = { + # Automatic Metrics. + constants.Metric.EXACT_MATCH: (gapic_evaluation_service_types.ExactMatchSpec()), + constants.Metric.BLEU: gapic_evaluation_service_types.BleuSpec(), + constants.Metric.ROUGE_1: gapic_evaluation_service_types.RougeSpec( + rouge_type="rouge1" + ), + constants.Metric.ROUGE_2: gapic_evaluation_service_types.RougeSpec( + rouge_type="rouge2" + ), + constants.Metric.ROUGE_L: gapic_evaluation_service_types.RougeSpec( + rouge_type="rougeL" + ), + constants.Metric.ROUGE_L_SUM: gapic_evaluation_service_types.RougeSpec( + rouge_type="rougeLsum" + ), + constants.Metric.TOOL_CALL_VALID: ( + gapic_evaluation_service_types.ToolCallValidSpec() + ), + constants.Metric.TOOL_NAME_MATCH: ( + gapic_evaluation_service_types.ToolNameMatchSpec() + ), + constants.Metric.TOOL_PARAMETER_KV_MATCH: ( + gapic_evaluation_service_types.ToolParameterKVMatchSpec() + ), + constants.Metric.TOOL_PARAMETER_KEY_MATCH: ( + gapic_evaluation_service_types.ToolParameterKeyMatchSpec() + ), + # Model-based Pointwise Metrics. + constants.Metric.FLUENCY: gapic_evaluation_service_types.FluencySpec(), + constants.Metric.COHERENCE: gapic_evaluation_service_types.CoherenceSpec(), + constants.Metric.SAFETY: gapic_evaluation_service_types.SafetySpec(), + constants.Metric.GROUNDEDNESS: (gapic_evaluation_service_types.GroundednessSpec()), + constants.Metric.FULFILLMENT: (gapic_evaluation_service_types.FulfillmentSpec()), + constants.Metric.SUMMARIZATION_QUALITY: ( + gapic_evaluation_service_types.SummarizationQualitySpec() + ), + constants.Metric.SUMMARIZATION_HELPFULNESS: ( + gapic_evaluation_service_types.SummarizationHelpfulnessSpec() + ), + constants.Metric.SUMMARIZATION_VERBOSITY: ( + gapic_evaluation_service_types.SummarizationVerbositySpec() + ), + constants.Metric.QUESTION_ANSWERING_QUALITY: ( + gapic_evaluation_service_types.QuestionAnsweringQualitySpec() + ), + constants.Metric.QUESTION_ANSWERING_RELEVANCE: ( + gapic_evaluation_service_types.QuestionAnsweringRelevanceSpec() + ), + constants.Metric.QUESTION_ANSWERING_CORRECTNESS: ( + gapic_evaluation_service_types.QuestionAnsweringCorrectnessSpec( + use_reference=True + ) + ), + constants.Metric.QUESTION_ANSWERING_HELPFULNESS: ( + gapic_evaluation_service_types.QuestionAnsweringHelpfulnessSpec() + ), + # Side-by-side(SxS) Pairwise Metrics. + constants.Metric.PAIRWISE_SUMMARIZATION_QUALITY: ( + gapic_evaluation_service_types.PairwiseSummarizationQualitySpec() + ), + constants.Metric.PAIRWISE_QUESTION_ANSWERING_QUALITY: ( + gapic_evaluation_service_types.PairwiseQuestionAnsweringQualitySpec() + ), +} + + +def build_request( + metric_name: str, + row_dict: Dict[str, Any], + evaluation_run_config: eval_base.EvaluationRunConfig, +) -> gapic_evaluation_service_types.EvaluateInstancesRequest: + """Builds a metric instance and form the request for the evaluation service. + + Args: + metric_name: The name of the metric to evaluate. + row_dict: An eval dataset instance in a dictionary. + evaluation_run_config: Evaluation Run Configurations. + + Returns: + A single EvaluateInstancesRequest. + """ + project = initializer.global_config.project + location = initializer.global_config.location + if not project or not location: + raise ValueError( + "No project or location specified. Please run `vertexai.init()` to" + " provide these parameters." + ) + location_path = ( + gapic_evaluation_services.EvaluationServiceAsyncClient.common_location_path( + project, location + ) + ) + + if metric_name not in _METRIC_NAME_TO_METRIC_SPEC: + raise ValueError(f"Metric name: {metric_name} not supported.") + metric_spec = _METRIC_NAME_TO_METRIC_SPEC[metric_name] + column_map = evaluation_run_config.column_map + prediction = row_dict.get( + column_map.get(constants.Dataset.MODEL_RESPONSE_COLUMN), "" + ) + baseline_prediction = row_dict.get( + column_map.get(constants.Dataset.BASELINE_MODEL_RESPONSE_COLUMN), "" + ) + reference = row_dict.get(column_map.get(constants.Dataset.REFERENCE_COLUMN), "") + context = row_dict.get(column_map.get(constants.Dataset.CONTEXT_COLUMN), "") + instruction = row_dict.get(column_map.get(constants.Dataset.INSTRUCTION_COLUMN), "") + + # Automatic Metrics. + if metric_name == constants.Metric.EXACT_MATCH: + instance = gapic_evaluation_service_types.ExactMatchInput( + metric_spec=metric_spec, + instances=[ + gapic_evaluation_service_types.ExactMatchInstance( + prediction=prediction, + reference=reference, + ) + ], + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + exact_match_input=instance, + ) + if metric_name == constants.Metric.BLEU: + instance = gapic_evaluation_service_types.BleuInput( + metric_spec=metric_spec, + instances=[ + gapic_evaluation_service_types.BleuInstance( + prediction=prediction, + reference=reference, + ) + ], + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + bleu_input=instance, + ) + if metric_name in ( + constants.Metric.ROUGE_1, + constants.Metric.ROUGE_2, + constants.Metric.ROUGE_L, + constants.Metric.ROUGE_L_SUM, + ): + instance = gapic_evaluation_service_types.RougeInput( + metric_spec=metric_spec, + instances=[ + gapic_evaluation_service_types.RougeInstance( + prediction=prediction, + reference=reference, + ) + ], + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + rouge_input=instance, + ) + if metric_name == constants.Metric.TOOL_CALL_VALID: + instance = gapic_evaluation_service_types.ToolCallValidInput( + metric_spec=metric_spec, + instances=[ + gapic_evaluation_service_types.ToolCallValidInstance( + prediction=prediction, + reference=reference, + ) + ], + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + tool_call_valid_input=instance, + ) + if metric_name == constants.Metric.TOOL_NAME_MATCH: + instance = gapic_evaluation_service_types.ToolNameMatchInput( + metric_spec=metric_spec, + instances=[ + gapic_evaluation_service_types.ToolNameMatchInstance( + prediction=prediction, + reference=reference, + ) + ], + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + tool_name_match_input=instance, + ) + if metric_name == constants.Metric.TOOL_PARAMETER_KEY_MATCH: + instance = gapic_evaluation_service_types.ToolParameterKeyMatchInput( + metric_spec=metric_spec, + instances=[ + gapic_evaluation_service_types.ToolParameterKeyMatchInstance( + prediction=prediction, + reference=reference, + ) + ], + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + tool_parameter_key_match_input=instance, + ) + if metric_name == constants.Metric.TOOL_PARAMETER_KV_MATCH: + instance = gapic_evaluation_service_types.ToolParameterKVMatchInput( + metric_spec=metric_spec, + instances=[ + gapic_evaluation_service_types.ToolParameterKVMatchInstance( + prediction=prediction, + reference=reference, + ) + ], + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + tool_parameter_kv_match_input=instance, + ) + # Model-based Pointwise Metrics. + if metric_name == constants.Metric.COHERENCE: + coherence_input = gapic_evaluation_service_types.CoherenceInput( + metric_spec=metric_spec, + instance=gapic_evaluation_service_types.CoherenceInstance( + prediction=prediction + ), + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + coherence_input=coherence_input, + ) + if metric_name == constants.Metric.FLUENCY: + fluency_input = gapic_evaluation_service_types.FluencyInput( + metric_spec=metric_spec, + instance=gapic_evaluation_service_types.FluencyInstance( + prediction=prediction + ), + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + fluency_input=fluency_input, + ) + if metric_name == constants.Metric.SAFETY: + safety_input = gapic_evaluation_service_types.SafetyInput( + metric_spec=metric_spec, + instance=gapic_evaluation_service_types.SafetyInstance( + prediction=prediction + ), + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + safety_input=safety_input, + ) + if metric_name == constants.Metric.GROUNDEDNESS: + groundedness_input = gapic_evaluation_service_types.GroundednessInput( + metric_spec=metric_spec, + instance=gapic_evaluation_service_types.GroundednessInstance( + prediction=prediction, context=context + ), + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + groundedness_input=groundedness_input, + ) + if metric_name == constants.Metric.FULFILLMENT: + fulfillment_input = gapic_evaluation_service_types.FulfillmentInput( + metric_spec=metric_spec, + instance=gapic_evaluation_service_types.FulfillmentInstance( + prediction=prediction, instruction=instruction + ), + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + fulfillment_input=fulfillment_input, + ) + if metric_name == constants.Metric.RESPONSE_RECALL: + raise NotImplementedError("Response recall is not implemented.") + if metric_name == constants.Metric.SUMMARIZATION_QUALITY: + # TODO(b/330807319): allow set reference field after setting metric spec is allowed. + summarization_quality_input = ( + gapic_evaluation_service_types.SummarizationQualityInput( + metric_spec=metric_spec, + instance=gapic_evaluation_service_types.SummarizationQualityInstance( + prediction=prediction, context=context, instruction=instruction + ), + ) + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + summarization_quality_input=summarization_quality_input, + ) + if metric_name == constants.Metric.SUMMARIZATION_HELPFULNESS: + # TODO(b/330807319): allow set reference field after setting metric spec is allowed. + summarization_helpfulness_input = gapic_evaluation_service_types.SummarizationHelpfulnessInput( + metric_spec=metric_spec, + instance=gapic_evaluation_service_types.SummarizationHelpfulnessInstance( + prediction=prediction, context=context, instruction=instruction + ), + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + summarization_helpfulness_input=summarization_helpfulness_input, + ) + if metric_name == constants.Metric.SUMMARIZATION_VERBOSITY: + # TODO(b/330807319): allow set reference field after setting metric spec is allowed. + summarization_verbosity_input = ( + gapic_evaluation_service_types.SummarizationVerbosityInput( + metric_spec=metric_spec, + instance=gapic_evaluation_service_types.SummarizationVerbosityInstance( + prediction=prediction, context=context, instruction=instruction + ), + ) + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + summarization_verbosity_input=summarization_verbosity_input, + ) + if metric_name == constants.Metric.QUESTION_ANSWERING_QUALITY: + # TODO(b/330807319): allow set reference field after setting metric spec is allowed. + question_answering_quality_input = gapic_evaluation_service_types.QuestionAnsweringQualityInput( + metric_spec=metric_spec, + instance=gapic_evaluation_service_types.QuestionAnsweringQualityInstance( + prediction=prediction, context=context, instruction=instruction + ), + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + question_answering_quality_input=question_answering_quality_input, + ) + if metric_name == constants.Metric.QUESTION_ANSWERING_HELPFULNESS: + # TODO(b/330807319): allow set reference field after setting metric spec is allowed. + question_answering_helpfulness_input = gapic_evaluation_service_types.QuestionAnsweringHelpfulnessInput( + metric_spec=metric_spec, + instance=gapic_evaluation_service_types.QuestionAnsweringHelpfulnessInstance( + prediction=prediction, + context=context, + instruction=instruction, + ), + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + question_answering_helpfulness_input=question_answering_helpfulness_input, + ) + if metric_name == constants.Metric.QUESTION_ANSWERING_RELEVANCE: + # TODO(b/330807319): allow set reference field after setting metric spec is allowed. + question_answering_relevance_input = gapic_evaluation_service_types.QuestionAnsweringRelevanceInput( + metric_spec=metric_spec, + instance=gapic_evaluation_service_types.QuestionAnsweringRelevanceInstance( + prediction=prediction, + context=context, + instruction=instruction, + ), + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + question_answering_relevance_input=question_answering_relevance_input, + ) + if metric_name == constants.Metric.QUESTION_ANSWERING_CORRECTNESS: + # TODO(b/330807319): allow set reference field after setting metric spec is allowed. + question_answering_correctness_input = gapic_evaluation_service_types.QuestionAnsweringCorrectnessInput( + metric_spec=metric_spec, + instance=gapic_evaluation_service_types.QuestionAnsweringCorrectnessInstance( + prediction=prediction, + context=context, + instruction=instruction, + reference=reference, + ), + ) + return gapic_evaluation_service_types.EvaluateInstancesRequest( + location=location_path, + question_answering_correctness_input=question_answering_correctness_input, + ) + if metric_name == constants.Metric.RAG_CONTEXT_RECALL: + raise NotImplementedError("RAG context recall is not implemented.") + # Side-by-side(SxS) Pairwise Metrics. + if metric_name == constants.Metric.PAIRWISE_SUMMARIZATION_QUALITY: + raise NotImplementedError("Pairwise summarization quality is not implemented.") + if metric_name == constants.Metric.PAIRWISE_QUESTION_ANSWERING_QUALITY: + raise NotImplementedError( + "Pairwise question answering quality is not implemented." + ) + + +def _parse_autometric_results( + metric_result_dict: Dict[str, Any], +) -> Dict[str, Any]: + """Parses the automatic metric results from the evaluation results. + + Args: + metric_result_dict: The metric results dictionary. + + Returns: + A dictionary containing metric score of the metric. + """ + for value in metric_result_dict.values(): + # Only single instance requests are used by SDK. + return { + constants.MetricResult.SCORE_KEY: value[0].get( + constants.MetricResult.SCORE_KEY + ) + } + + +def _parse_pointwise_results( + metric_result_dict: Dict[str, Any], +) -> Dict[str, Any]: + """Parses the pointwise metric results from the evaluation results. + + Args: + metric_result_dict: The metric results dictionary. + + Returns: + A dictionary containing metric score, explanation, confidence of the + metric. + """ + return { + constants.MetricResult.SCORE_KEY: metric_result_dict.get( + constants.MetricResult.SCORE_KEY + ), + constants.MetricResult.EXPLANATION_KEY: metric_result_dict.get( + constants.MetricResult.EXPLANATION_KEY + ), + constants.MetricResult.CONFIDENCE_KEY: metric_result_dict.get( + constants.MetricResult.CONFIDENCE_KEY + ), + } + + +def _parse_pairwise_results( + metric_result_dict: Dict[str, Any], +) -> Dict[str, Any]: + """Parses the pairwise metric results from the evaluation results. + + s + + Args: + metric_result_dict: The metric results dictionary. + + Returns: + A dictionary containing metric score, explanation, confidence of the + metric. + """ + return { + # TODO(b/330598854): handle pairwise choice. + constants.MetricResult.PAIRWISE_CHOICE_KEY: metric_result_dict.get( + constants.MetricResult.PAIRWISE_CHOICE_KEY, + gapic_evaluation_service_types.PairwiseChoice.PAIRWISE_CHOICE_UNSPECIFIED, + ), + constants.MetricResult.EXPLANATION_KEY: metric_result_dict.get( + constants.MetricResult.EXPLANATION_KEY + ), + constants.MetricResult.CONFIDENCE_KEY: metric_result_dict.get( + constants.MetricResult.CONFIDENCE_KEY + ), + } + + +def _handle_response( + response: gapic_evaluation_service_types.EvaluateInstancesResponse, +) -> Dict[str, Any]: + """Handles the response from the evaluation service. + + Args: + response: The response from the evaluation service. + + Returns: + The metric score of the evaluation. + """ + metric_type = response._pb.WhichOneof("evaluation_results") + + # Automatic Metrics. + if metric_type == constants.MetricResult.EXACT_MATCH_RESULTS: + metric_result = response.exact_match_results + elif metric_type == constants.MetricResult.BLEU_RESULTS: + metric_result = response.bleu_results + elif metric_type == constants.MetricResult.ROUGE_RESULTS: + metric_result = response.rouge_results + elif metric_type == constants.MetricResult.TOOL_CALL_VALID_RESULTS: + metric_result = response.tool_call_valid_results + elif metric_type == constants.MetricResult.TOOL_NAME_MATCH_RESULTS: + metric_result = response.tool_name_match_results + elif metric_type == constants.MetricResult.TOOL_PARAMETER_KEY_MATCH_RESULTS: + metric_result = response.tool_parameter_key_match_results + elif metric_type == constants.MetricResult.TOOL_PARAMETER_KV_MATCH_RESULTS: + metric_result = response.tool_parameter_kv_match_results + # Model-based Pointwise Metrics. + elif metric_type == constants.MetricResult.COHERENCE_RESULT: + metric_result = response.coherence_result + elif metric_type == constants.MetricResult.FULFILLMENT_RESULT: + metric_result = response.fulfillment_result + elif metric_type == constants.MetricResult.FLUENCY_RESULT: + metric_result = response.fluency_result + elif metric_type == constants.MetricResult.SAFETY_RESULT: + metric_result = response.safety_result + elif metric_type == constants.MetricResult.GROUNDEDNESS_RESULT: + metric_result = response.groundedness_result + elif metric_type == constants.MetricResult.RESPONSE_RECALL_RESULT: + metric_result = response.response_recall_result + elif metric_type == constants.MetricResult.SUMMARIZATION_QUALITY_RESULT: + metric_result = response.summarization_quality_result + elif metric_type == constants.MetricResult.SUMMARIZATION_HELPFULNESS_RESULT: + metric_result = response.summarization_helpfulness_result + elif metric_type == constants.MetricResult.SUMMARIZATION_VERBOSITY_RESULT: + metric_result = response.summarization_verbosity_result + elif metric_type == constants.MetricResult.QUESTION_ANSWERING_QUALITY_RESULT: + metric_result = response.question_answering_quality_result + elif metric_type == constants.MetricResult.QUESTION_ANSWERING_RELEVANCE_RESULT: + metric_result = response.question_answering_relevance_result + elif metric_type == constants.MetricResult.QUESTION_ANSWERING_HELPFULNESS_RESULT: + metric_result = response.question_answering_helpfulness_result + elif metric_type == constants.MetricResult.QUESTION_ANSWERING_CORRECTNESS_RESULT: + metric_result = response.question_answering_correctness_result + elif metric_type == constants.MetricResult.RAG_CONTEXT_RECALL_RESULT: + metric_result = response.rag_context_recall_result + # Side-by-side(SxS) Pairwise Metrics. + elif metric_type == constants.MetricResult.PAIRWISE_SUMMARIZATION_QUALITY_RESULT: + metric_result = response.pairwise_summarization_quality_result + elif ( + metric_type == constants.MetricResult.PAIRWISE_QUESTION_ANSWERING_QUALITY_RESULT + ): + metric_result = response.pairwise_question_answering_quality_result + else: + raise ValueError(f"Unknown metric type: {metric_type}") + + metric_result_dict = json_format.MessageToDict( + metric_result._pb, preserving_proto_field_name=True + ) + + if metric_type in constants.MetricResult.AUTOMATIC_METRIC_RESULTS_LIST: + result = _parse_autometric_results(metric_result_dict) + elif metric_type in constants.MetricResult.MODEL_BASED_METRIC_RESULT_LIST: + result = _parse_pointwise_results(metric_result_dict) + elif metric_type in constants.MetricResult.PAIRWISE_METRIC_RESULT_LIST: + result = _parse_pairwise_results(metric_result_dict) + else: + raise ValueError(f"Unknown metric type: {metric_type}") + return result + + +async def evaluate_instances_async( + client: gapic_evaluation_services.EvaluationServiceAsyncClient, + request: gapic_evaluation_service_types.EvaluateInstancesRequest, +): + """Evaluates an instance asynchronously. + + Args: + client: The client to use for evaluation. + request: An EvaluateInstancesRequest. + + Returns: + The metric score of the evaluation. + """ + + response = await client.evaluate_instances( + request=request, + retry=api_core.retry_async.AsyncRetry( + initial=0.250, + maximum=90.0, + multiplier=1.45, + deadline=600.0, + predicate=api_core.retry.if_exception_type( + api_core.exceptions.Aborted, + api_core.exceptions.DeadlineExceeded, + api_core.exceptions.InternalServerError, + api_core.exceptions.ResourceExhausted, + api_core.exceptions.ServiceUnavailable, + api_core.exceptions.Unknown, + api_core.exceptions.Cancelled, + ), + ), + ) + return _handle_response(response) diff --git a/vertexai/preview/evaluation/prompt_template.py b/vertexai/preview/evaluation/prompt_template.py new file mode 100644 index 0000000000..14b0f6bd6a --- /dev/null +++ b/vertexai/preview/evaluation/prompt_template.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import string +from typing import Set + + +class PromptTemplate: + """A prompt template for creating prompts with placeholders. + + The `PromptTemplate` class allows users to define a template string with + placeholders represented in curly braces `{placeholder}`. The placeholder + names cannot contain spaces. These placeholders can be replaced with specific + values using the `assemble` method, providing flexibility in generating + dynamic prompts. + + Example Usage: + + ``` + template_str = "Hello, {name}! Today is {day}. How are you?" + prompt_template = PromptTemplate(template_str) + completed_prompt = prompt_template.assemble(name="John", day="Monday") + print(completed_prompt) + ``` + + Attributes: + template: The template string containing placeholders for replacement. + placeholders: A set of placeholder names from the template string. + """ + + def __init__(self, template: str): + """Initializes the PromptTemplate with a given template. + + Args: + template: The template string with placeholders. Placeholders should be + represented in curly braces `{placeholder}`. + """ + self.template = str(template) + self.placeholders = self._get_placeholders() + + def _get_placeholders(self) -> Set[str]: + """Extracts and return a set of placeholder names from the template.""" + return set( + field_name + for _, field_name, _, _ in string.Formatter().parse(self.template) + if field_name is not None + ) + + def assemble(self, **kwargs) -> "PromptTemplate": + """Replaces only the provided placeholders in the template with specific values. + + Args: + **kwargs: Keyword arguments where keys are placeholder names and values + are the replacements. + + Returns: + A new PromptTemplate instance with the updated template string. + """ + replaced_values = { + key: kwargs.get(key, "{" + key + "}") for key in self.placeholders + } + new_template = self.template.format(**replaced_values) + return PromptTemplate(new_template) + + def __str__(self) -> str: + """Returns the template string.""" + return self.template + + def __repr__(self) -> str: + """Returns a string representation of the PromptTemplate.""" + return f"PromptTemplate('{self.template}')" diff --git a/vertexai/preview/evaluation/utils.py b/vertexai/preview/evaluation/utils.py new file mode 100644 index 0000000000..c85caba690 --- /dev/null +++ b/vertexai/preview/evaluation/utils.py @@ -0,0 +1,176 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +import io +import os +from typing import Any, Dict, Optional, Union, TYPE_CHECKING + +from google.cloud import bigquery +from google.cloud import storage +from google.cloud.aiplatform import compat +from google.cloud.aiplatform import initializer +from google.cloud.aiplatform import utils +from google.cloud.aiplatform_v1beta1.services import ( + evaluation_service as gapic_evaluation_services, +) + +if TYPE_CHECKING: + import pandas as pd + +_BQ_PREFIX = "bq://" +_GCS_PREFIX = "gs://" + + +class _EvaluationServiceAsyncClientWithOverride(utils.ClientWithOverride): + _is_temporary = False + _default_version = compat.V1BETA1 + _version_map = ( + ( + compat.V1BETA1, + gapic_evaluation_services.EvaluationServiceAsyncClient, + ), + ) + + +def create_evaluation_service_async_client( + api_base_path_override: Optional[str] = None, +) -> _EvaluationServiceAsyncClientWithOverride: + """Creates an aync client for the evaluation service. + + Args: + api_base_path_override: Optional. Override default api base path. + + Returns: + Instantiated Vertex AI EvaluationService async client with optional overrides. + """ + return initializer.global_config.create_client( + client_class=_EvaluationServiceAsyncClientWithOverride, + location_override=initializer.global_config.location, + api_base_path_override=api_base_path_override, + ) + + +def load_dataset(source: Union[str, "pd.DataFrame", Dict[str, Any]]) -> "pd.DataFrame": + """Loads dataset from various sources into a DataFrame. + + Args: + source: The data source. Can be the following formats: + - pd.DataFrame: Used directly for evaluation. + - dict: Converted to a pandas DataFrame before evaluation. + - str: Interpreted as a file path or URI. Supported formats include: + * Local JSONL or CSV files: Loaded from the local filesystem. + * GCS JSONL or CSV files: Loaded from Google Cloud Storage + (e.g., 'gs://bucket/data.csv'). + * BigQuery table URI: Loaded from Google Cloud BigQuery + (e.g., 'bq://project-id.dataset.table_name'). + + Returns: + The dataset in pandas DataFrame format. + """ + try: + import pandas as pd + except ImportError: + raise ImportError( + 'Pandas is not installed. Please install the SDK using "pip install' + ' google-cloud-aiplatform[rapid_evaluation]"' + ) + + if isinstance(source, pd.DataFrame): + return source.copy() + elif isinstance(source, dict): + return pd.DataFrame(source) + elif isinstance(source, str): + if source.startswith(_BQ_PREFIX): + return _load_bigquery(source[len(_BQ_PREFIX) :]) + + _, extension = os.path.splitext(source) + file_type = extension.lower()[1:] + + if file_type == "jsonl": + return _load_jsonl(source) + elif file_type == "csv": + return _load_csv(source) + else: + raise ValueError(f"Unsupported file type: {file_type}") + else: + raise TypeError( + "Unsupported dataset type. Must be DataFrame, dictionary, or" " filepath." + ) + + +def _load_jsonl(filepath: str) -> "pd.DataFrame": + """Loads data from a JSONL file into a DataFrame.""" + try: + import pandas as pd + except ImportError: + raise ImportError( + 'Pandas is not installed. Please install the SDK using "pip install' + ' google-cloud-aiplatform[rapid_evaluation]"' + ) + if filepath.startswith(_GCS_PREFIX): + file_contents = _read_gcs_file_contents(filepath) + return pd.read_json(file_contents, lines=True) + else: + with open(filepath, "r") as f: + return pd.read_json(f, lines=True) + + +def _load_csv(filepath: str) -> "pd.DataFrame": + """Loads data from a CSV file into a DataFrame.""" + try: + import pandas as pd + except ImportError: + raise ImportError( + 'Pandas is not installed. Please install the SDK using "pip install' + ' google-cloud-aiplatform[rapid_evaluation]"' + ) + if filepath.startswith(_GCS_PREFIX): + file_contents = _read_gcs_file_contents(filepath) + return pd.read_csv(io.StringIO(file_contents), encoding="utf-8") + else: + return pd.read_csv(filepath, encoding="utf-8") + + +def _load_bigquery(table_id: str) -> "pd.DataFrame": + """Loads data from a BigQuery table into a DataFrame.""" + + client = bigquery.Client( + project=initializer.global_config.project, + credentials=initializer.global_config.credentials, + ) + table = client.get_table(table_id) + return client.list_rows(table).to_dataframe() + + +def _read_gcs_file_contents(filepath: str) -> str: + """Reads the contents of a file from Google Cloud Storage. + + Args: + filepath: The GCS file path (e.g., 'gs://bucket_name/file.csv') + + Returns: + The contents of the file. + """ + + client = storage.Client( + project=initializer.global_config.project, + credentials=initializer.global_config.credentials, + ) + bucket_name, blob_path = filepath[len(_GCS_PREFIX) :].split("/", 1) + bucket = client.get_bucket(bucket_name) + blob = bucket.blob(blob_path) + return blob.download_as_string().decode("utf-8") diff --git a/vertexai/preview/extensions.py b/vertexai/preview/extensions.py new file mode 100644 index 0000000000..daf8f759dd --- /dev/null +++ b/vertexai/preview/extensions.py @@ -0,0 +1,25 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Classes for working with extensions.""" + +# We just want to re-export certain classes +# pylint: disable=g-multiple-import,g-importing-member +from vertexai.extensions._extensions import ( + Extension, +) + +__all__ = ( + "Extension", +) diff --git a/vertexai/preview/generative_models.py b/vertexai/preview/generative_models.py index 920e1f554d..e211be816d 100644 --- a/vertexai/preview/generative_models.py +++ b/vertexai/preview/generative_models.py @@ -34,7 +34,9 @@ Part, ResponseBlockedError, ResponseValidationError, + SafetySetting, Tool, + ToolConfig, ) @@ -64,6 +66,8 @@ class ChatSession(_PreviewChatSession): "Part", "ResponseBlockedError", "ResponseValidationError", + "SafetySetting", "Tool", + "ToolConfig", # ] diff --git a/vertexai/preview/reasoning_engines/__init__.py b/vertexai/preview/reasoning_engines/__init__.py new file mode 100644 index 0000000000..e6889025c1 --- /dev/null +++ b/vertexai/preview/reasoning_engines/__init__.py @@ -0,0 +1,31 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Classes for working with reasoning engines.""" + +# We just want to re-export certain classes +# pylint: disable=g-multiple-import,g-importing-member +from vertexai.reasoning_engines._reasoning_engines import ( + Queryable, + ReasoningEngine, +) +from vertexai.preview.reasoning_engines.templates.langchain import ( + LangchainAgent +) + +__all__ = ( + "LangchainAgent", + "Queryable", + "ReasoningEngine", +) diff --git a/vertexai/preview/reasoning_engines/templates/langchain.py b/vertexai/preview/reasoning_engines/templates/langchain.py new file mode 100644 index 0000000000..847f390b47 --- /dev/null +++ b/vertexai/preview/reasoning_engines/templates/langchain.py @@ -0,0 +1,406 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import json +from typing import ( + TYPE_CHECKING, + Any, + Callable, + List, + Mapping, + Optional, + Sequence, + Tuple, + Union, +) + +if TYPE_CHECKING: + try: + from langchain_core import runnables + from langchain_core import tools as lc_tools + + BaseTool = lc_tools.BaseTool + GetSessionHistoryCallable = runnables.history.GetSessionHistoryCallable + RunnableConfig = runnables.RunnableConfig + RunnableSerializable = runnables.RunnableSerializable + except ImportError: + BaseTool = Any + GetSessionHistoryCallable = Any + RunnableConfig = Any + RunnableSerializable = Any + + +def _default_runnable_kwargs(has_history: bool) -> Mapping[str, Any]: + # https://github.com/langchain-ai/langchain/blob/5784dfed001730530637793bea1795d9d5a7c244/libs/core/langchain_core/runnables/history.py#L237-L241 + runnable_kwargs = { + # input_messages_key (str): Must be specified if the underlying + # agent accepts a dict as input. + "input_messages_key": "input", + # output_messages_key (str): Must be specified if the underlying + # agent returns a dict as output. + "output_messages_key": "output", + } + if has_history: + # history_messages_key (str): Must be specified if the underlying + # agent accepts a dict as input and a separate key for historical + # messages. + runnable_kwargs["history_messages_key"] = "history" + return runnable_kwargs + + +def _default_output_parser(): + from langchain_core import agents + from langchain_core import output_parsers + from langchain_core import outputs + + class DefaultOutputParser(output_parsers.BaseOutputParser): + + def parse_result( + self, + result: List[outputs.Generation], + ) -> Union[agents.AgentAction, agents.AgentFinish]: + if not isinstance(result[0], outputs.ChatGeneration): + raise ValueError( + "This output parser only works on ChatGeneration output" + ) + msg = result[0].message + content = msg.content + function_call = msg.additional_kwargs.get("function_call", {}) + if function_call: + function_name = function_call["name"] + tool_input = json.loads(function_call.get("arguments", {})) + content_msg = f"responded: {content}\n" if content else "\n" + log_msg = ( + f"\nInvoking: `{function_name}` with `{tool_input}`\n" + f"{content_msg}\n" + ) + return agents.AgentActionMessageLog( + tool=function_name, + tool_input=tool_input, + log=log_msg, + message_log=[msg], + ) + return agents.AgentFinish( + return_values={"output": content}, + log=str(content), + ) + + def parse( + self, + text: str, + ) -> Union[agents.AgentAction, agents.AgentFinish]: + raise ValueError("Can only parse messages") + + return DefaultOutputParser() + + +def _default_prompt(has_history: bool) -> "RunnableSerializable": + from langchain_core import agents + from langchain_core import messages + from langchain_core import prompts + + def _convert_agent_action_to_messages( + agent_action: agents.AgentAction, observation: str + ) -> List[messages.BaseMessage]: + """Convert an agent action to a message. + + This is used to reconstruct the original message from the agent action. + + Args: + agent_action (AgentAction): The action to convert into messages. + observation (str): The observation to convert into messages. + + Returns: + List[messages.BaseMessage]: A list of messages that corresponds to + the original tool invocation. + """ + if isinstance(agent_action, agents.AgentActionMessageLog): + return list(agent_action.message_log) + [ + _create_function_message(agent_action, observation) + ] + else: + return [messages.AIMessage(content=agent_action.log)] + + def _create_function_message( + agent_action: agents.AgentAction, observation: str + ) -> messages.FunctionMessage: + """Convert agent action and observation into a function message. + + Args: + agent_action (AgentAction): tool invocation request from the agent. + observation (str): the result of the tool invocation. + + Returns: + FunctionMessage: A message corresponding to the tool invocation. + """ + if not isinstance(observation, str): + try: + content = json.dumps(observation, ensure_ascii=False) + except Exception: + content = str(observation) + else: + content = observation + return messages.FunctionMessage(name=agent_action.tool, content=content) + + def _format_to_messages( + intermediate_steps: Sequence[Tuple[agents.AgentAction, str]], + ) -> List[messages.BaseMessage]: + """Convert (AgentAction, tool output) tuples into messages. + + Args: + intermediate_steps (Sequence[Tuple[AgentAction, str]]): + Required. Steps the model has taken, along with observations. + + Returns: + List[langchain_core.messages.BaseMessage]: list of messages to send + to the model for the next generation. + + """ + scratchpad_messages = [] + for agent_action, observation in intermediate_steps: + scratchpad_messages.extend( + _convert_agent_action_to_messages(agent_action, observation) + ) + return scratchpad_messages + + if has_history: + return { + "history": lambda x: x["history"], + "input": lambda x: x["input"], + "agent_scratchpad": ( + lambda x: _format_to_messages(x["intermediate_steps"]) + ), + } | prompts.ChatPromptTemplate.from_messages([ + prompts.MessagesPlaceholder(variable_name="history"), + ("user", "{input}"), + prompts.MessagesPlaceholder(variable_name="agent_scratchpad"), + ]) + else: + return { + "input": lambda x: x["input"], + "agent_scratchpad": ( + lambda x: _format_to_messages(x["intermediate_steps"]) + ), + } | prompts.ChatPromptTemplate.from_messages([ + ("user", "{input}"), + prompts.MessagesPlaceholder(variable_name="agent_scratchpad"), + ]) + + +class LangchainAgent: + """A Langchain Agent. + + Reference: + * Agent: https://python.langchain.com/docs/modules/agents/concepts + * Memory: https://python.langchain.com/docs/expression_language/how_to/message_history + """ + + def __init__( + self, + model: str, + *, + prompt: Optional["RunnableSerializable"] = None, + tools: Optional[Sequence[Union[Callable, "BaseTool"]]] = None, + output_parser: Optional["RunnableSerializable"] = None, + chat_history: Optional["GetSessionHistoryCallable"] = None, + model_kwargs: Optional[Mapping[str, Any]] = None, + agent_executor_kwargs: Optional[Mapping[str, Any]] = None, + runnable_kwargs: Optional[Mapping[str, Any]] = None, + ): + """Initializes the LangchainAgent. + + Under-the-hood, assuming .set_up() is called, this will correspond to + + ``` + from langchain import agents + from langchain_core.runnables.history import RunnableWithMessageHistory + from langchain_google_vertexai import ChatVertexAI + + llm = ChatVertexAI(model_name=model, **model_kwargs) + agent_executor = agents.AgentExecutor( + agent=prompt | llm.bind(functions=tools) | output_parser, + tools=tools, + **agent_executor_kwargs, + ) + runnable = RunnableWithMessageHistory( + runnable=agent_executor, + get_session_history=chat_history, + **runnable_kwargs, + ) + ``` + + Args: + model (str): + Optional. The name of the model (e.g. "gemini-1.0-pro"). + prompt (langchain_core.runnables.RunnableSerializable): + Optional. The prompt template for the model. Defaults to a + ChatPromptTemplate. + tools (Sequence[langchain_core.tools.BaseTool, Callable]): + Optional. The tools for the agent to be able to use. All input + callables (e.g. function or class method) will be converted + to a langchain.tools.base.StructuredTool. Defaults to None. + output_parser (langchain_core.runnables.RunnableSerializable): + Optional. The output parser for the model. Defaults to an + output parser that works with Gemini function-calling. + chat_history (langchain_core.runnables.history.GetSessionHistoryCallable): + Optional. Callable that returns a new BaseChatMessageHistory. + Defaults to None, i.e. chat_history is not preserved. + model_kwargs (Mapping[str, Any]): + Optional. Additional keyword arguments for the constructor of + chat_models.ChatVertexAI. An example would be + ``` + { + # temperature (float): Sampling temperature, it controls the + # degree of randomness in token selection. + "temperature": 0.28, + # max_output_tokens (int): Token limit determines the + # maximum amount of text output from one prompt. + "max_output_tokens": 1000, + # top_p (float): Tokens are selected from most probable to + # least, until the sum of their probabilities equals the + # top_p value. + "top_p": 0.95, + # top_k (int): How the model selects tokens for output, the + # next token is selected from among the top_k most probable + # tokens. + "top_k": 40, + } + ``` + agent_executor_kwargs (Mapping[str, Any]): + Optional. Additional keyword arguments for the constructor of + langchain.agents.AgentExecutor. An example would be + ``` + { + # Whether to return the agent's trajectory of intermediate + # steps at the end in addition to the final output. + "return_intermediate_steps": False, + # The maximum number of steps to take before ending the + # execution loop. + "max_iterations": 15, + # The method to use for early stopping if the agent never + # returns `AgentFinish`. Either 'force' or 'generate'. + "early_stopping_method": "force", + # How to handle errors raised by the agent's output parser. + # Defaults to `False`, which raises the error. + "handle_parsing_errors": False, + } + ``` + runnable_kwargs (Mapping[str, Any]): + Optional. Additional keyword arguments for the constructor of + langchain.runnables.history.RunnableWithMessageHistory if + chat_history is specified. If chat_history is None, this will be + ignored. + """ + from google.cloud.aiplatform import initializer + self._project = initializer.global_config.project + self._location = initializer.global_config.location + self._tools = [] + if tools: + from langchain_core import tools as lc_tools + from langchain.tools.base import StructuredTool + self._tools = [ + tool if isinstance(tool, lc_tools.BaseTool) + else StructuredTool.from_function(tool) + for tool in tools + ] + self._model_name = model + self._prompt = prompt + self._output_parser = output_parser + self._chat_history = chat_history + self._model_kwargs = model_kwargs + self._agent_executor_kwargs = agent_executor_kwargs + self._runnable_kwargs = runnable_kwargs + self._runnable = None + self._chat_history_store = None + + def set_up(self): + """Sets up the agent for execution of queries at runtime. + + It initializes the model, binds the model with tools, and connects it + with the prompt template and output parser. + + This method should not be called for an object that being passed to + the ReasoningEngine service for deployment, as it initializes clients + that can not be serialized. + """ + from langchain.agents import AgentExecutor + from langchain_core.runnables.history import RunnableWithMessageHistory + from langchain_google_vertexai import ChatVertexAI + import vertexai + from google.cloud.aiplatform import initializer + + has_history = self._chat_history is not None + self._prompt = self._prompt or _default_prompt(has_history) + self._output_parser = self._output_parser or _default_output_parser() + self._model_kwargs = self._model_kwargs or {} + self._agent_executor_kwargs = self._agent_executor_kwargs or {} + self._runnable_kwargs = ( + self._runnable_kwargs or _default_runnable_kwargs(has_history) + ) + + current_project = initializer.global_config.project + current_location = initializer.global_config.location + vertexai.init(project=self._project, location=self._location) + self._llm = ChatVertexAI( + model_name=self._model_name, + # https://github.com/langchain-ai/langchain/issues/14700 + convert_system_message_to_human=True, + **self._model_kwargs, + ) + vertexai.init(project=current_project, location=current_location) + + if self._tools: + self._llm = self._llm.bind(functions=self._tools) + self._agent = self._prompt | self._llm | self._output_parser + self._agent_executor = AgentExecutor( + agent=self._agent, + tools=self._tools, + **self._agent_executor_kwargs, + ) + runnable = self._agent_executor + if has_history: + runnable = RunnableWithMessageHistory( + runnable=self._agent_executor, + get_session_history=self._chat_history, + **self._runnable_kwargs, + ) + self._runnable = runnable + + def query( + self, + *, + input: Union[str, Mapping[str, Any]], + config: Optional["RunnableConfig"] = None, + **kwargs: Any, + ) -> Mapping[str, Any]: + """Queries the Agent with the given input and config. + + Args: + input (Union[str, Mapping[str, Any]]): + Required. The input to be passed to the Agent. + config (langchain_core.runnables.RunnableConfig): + Optional. The config (if any) to be used for invoking the Agent. + **kwargs: + Optional. Any additional keyword arguments to be passed to the + `.invoke()` method of the corresponding AgentExecutor. + + Returns: + The output of querying the Agent with the given input and config. + """ + if isinstance(input, str): + input = {"input": input} + if not self._runnable: + self.set_up() + return self._runnable.invoke(input=input, config=config, **kwargs) diff --git a/vertexai/preview/tuning/__init__.py b/vertexai/preview/tuning/__init__.py new file mode 100644 index 0000000000..9b8362969b --- /dev/null +++ b/vertexai/preview/tuning/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Classes for tuning models.""" + +# We just want to re-export certain classes +# pylint: disable=g-multiple-import,g-importing-member +from vertexai.tuning._tuning import TuningJob + +__all__ = [ + "TuningJob", +] diff --git a/vertexai/preview/tuning/sft.py b/vertexai/preview/tuning/sft.py new file mode 100644 index 0000000000..3580a4f6e2 --- /dev/null +++ b/vertexai/preview/tuning/sft.py @@ -0,0 +1,27 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +"""Classes for supervised tuning.""" + +# We just want to re-export certain classes +# pylint: disable=g-multiple-import,g-importing-member +from vertexai.tuning._supervised_tuning import ( + train, + SupervisedTuningJob, +) + +__all__ = [ + "train", + "SupervisedTuningJob", +] diff --git a/vertexai/reasoning_engines/_reasoning_engines.py b/vertexai/reasoning_engines/_reasoning_engines.py new file mode 100644 index 0000000000..751fac22fc --- /dev/null +++ b/vertexai/reasoning_engines/_reasoning_engines.py @@ -0,0 +1,377 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +import inspect +import io +import os +import sys +import tarfile +import typing +from typing import Optional, Protocol, Sequence + +from google.cloud.aiplatform import base +from google.cloud.aiplatform import initializer +from google.cloud.aiplatform import utils as aip_utils +from google.cloud.aiplatform_v1beta1 import types +from vertexai.reasoning_engines import _utils + + +_LOGGER = base.Logger(__name__) +_SUPPORTED_PYTHON_VERSIONS = ("3.8", "3.9", "3.10", "3.11") +_DEFAULT_GCS_DIR_NAME = "reasoning_engine" +_BLOB_FILENAME = "reasoning_engine.pkl" +_REQUIREMENTS_FILE = "requirements.txt" +_EXTRA_PACKAGES_FILE = "dependencies.tar.gz" + + +@typing.runtime_checkable +class Queryable(Protocol): + """Protocol for Reasoning Engine applications that can be queried.""" + + @abc.abstractmethod + def query(self, **kwargs): + """Runs the Reasoning Engine to serve the user query.""" + + +class ReasoningEngine(base.VertexAiResourceNounWithFutureManager, Queryable): + """Represents a Vertex AI Reasoning Engine resource.""" + client_class = aip_utils.ReasoningEngineClientWithOverride + _resource_noun = "reasoning_engine" + _getter_method = "get_reasoning_engine" + _list_method = "list_reasoning_engines" + _delete_method = "delete_reasoning_engine" + _parse_resource_name_method = "parse_reasoning_engine_path" + _format_resource_name_method = "reasoning_engine_path" + + def __init__(self, reasoning_engine_name: str): + """Retrieves a Reasoning Engine resource. + + Args: + reasoning_engine_name (str): + Required. A fully-qualified resource name or ID such as + "projects/123/locations/us-central1/reasoningEngines/456" or + "456" when project and location are initialized or passed. + """ + super().__init__(resource_name=reasoning_engine_name) + self.execution_api_client = initializer.global_config.create_client( + client_class=aip_utils.ReasoningEngineExecutionClientWithOverride, + ) + self._gca_resource = self._get_gca_resource( + resource_name=reasoning_engine_name + ) + self._operation_schemas = None + + @property + def resource_name(self) -> str: + """Fully-qualified resource name.""" + return self._gca_resource.name + + @classmethod + def create( + cls, + reasoning_engine: Queryable, + *, + requirements: Optional[Sequence[str]] = None, + reasoning_engine_name: Optional[str] = None, + display_name: Optional[str] = None, + description: Optional[str] = None, + gcs_dir_name: str = _DEFAULT_GCS_DIR_NAME, + sys_version: Optional[str] = None, + extra_packages: Optional[Sequence[str]] = None, + ) -> "ReasoningEngine": + """Creates a new ReasoningEngine. + + The Reasoning Engine will be an instance of the `reasoning_engine` that + was passed in, running remotely on Vertex AI. + + Sample ``src_dir`` contents (e.g. ``./user_src_dir``): + + .. code-block:: python + + user_src_dir/ + |-- main.py + |-- requirements.txt + |-- user_code/ + | |-- utils.py + | |-- ... + |-- ... + + To build a Reasoning Engine: + + .. code-block:: python + + remote_app = ReasoningEngine.create( + local_app, + requirements=[ + # I.e. the PyPI dependencies listed in requirements.txt + "google-cloud-aiplatform==1.25.0", + "langchain==0.0.242", + ... + ], + extra_packages=[ + "./user_src_dir/main.py", # a single file + "./user_src_dir/user_code", # a directory + ... + ], + ) + + Args: + reasoning_engine (ReasoningEngineInterface): + Required. The Reasoning Engine to be created. + requirements (Sequence[str]): + Optional. The set of PyPI dependencies needed. + reasoning_engine_name (str): + Optional. A fully-qualified resource name or ID such as + "projects/123/locations/us-central1/reasoningEngines/456" or + "456" when project and location are initialized or passed. If + specifying the ID, it should be 4-63 characters. Valid + characters are lowercase letters, numbers and hyphens ("-"), + and it should start with a number or a lower-case letter. If not + provided, Vertex AI will generate a value for this ID. + display_name (str): + Optional. The user-defined name of the Reasoning Engine. + The name can be up to 128 characters long and can comprise any + UTF-8 character. + description (str): + Optional. The description of the Reasoning Engine. + gcs_dir_name (CreateReasoningEngineOptions): + Optional. The GCS bucket directory under `staging_bucket` to + use for staging the artifacts needed. + sys_version (str): + Optional. The Python system version used. Currently supports any + of "3.8", "3.9", "3.10", "3.11". If not specified, it defaults + to the "{major}.{minor}" attributes of sys.version_info. + extra_packages (Sequence[str]): + Optional. The set of extra user-provided packages (if any). + + Returns: + ReasoningEngine: The Reasoning Engine that was created. + + Raises: + ValueError: If `sys.version` is not supported by ReasoningEngine. + ValueError: If the project was not set using vertexai.init. + ValueError: If the location was not set using vertexai.init. + ValueError: If the staging_bucket was not set using vertexai.init. + ValueError: If the staging_bucket does not start with "gs://". + """ + if not sys_version: + sys_version = f"{sys.version_info.major}.{sys.version_info.minor}" + if sys_version not in _SUPPORTED_PYTHON_VERSIONS: + raise ValueError( + f"Unsupported python version: {sys_version}. ReasoningEngine " + f"only supports {_SUPPORTED_PYTHON_VERSIONS} at the moment." + ) + sdk_resource = cls.__new__(cls) + base.VertexAiResourceNounWithFutureManager.__init__( + sdk_resource, + resource_name=reasoning_engine_name, + ) + staging_bucket = initializer.global_config.staging_bucket + if not staging_bucket: + raise ValueError( + "Please provide a `staging_bucket` in `vertexai.init(...)`" + ) + if not staging_bucket.startswith("gs://"): + raise ValueError(f"{staging_bucket=} must start with `gs://`") + if not ( + hasattr(reasoning_engine, "query") + and callable(reasoning_engine.query) + ): + raise TypeError( + "reasoning_engine does not have a callable method named `query`" + ) + try: + inspect.signature(reasoning_engine.query) + except ValueError as err: + raise ValueError( + "Invalid query signature. This might be due to a missing " + "`self` argument in the reasoning_engine.query method." + ) from err + requirements = requirements or [] + extra_packages = extra_packages or [] + # Prepares the Reasoning Engine for creation in Vertex AI. + # This involves packaging and uploading the artifacts for + # reasoning_engine, requirements and extra_packages to + # `staging_bucket/gcs_dir_name`. + _prepare( + reasoning_engine=reasoning_engine, + requirements=requirements, + project=sdk_resource.project, + location=sdk_resource.location, + staging_bucket=staging_bucket, + gcs_dir_name=gcs_dir_name, + extra_packages=extra_packages, + ) + package_spec = types.ReasoningEngineSpec.PackageSpec( + python_version=sys_version, + pickle_object_gcs_uri="{}/{}/{}".format( + staging_bucket, + gcs_dir_name, + _BLOB_FILENAME, + ), + dependency_files_gcs_uri="{}/{}/{}".format( + staging_bucket, + gcs_dir_name, + _EXTRA_PACKAGES_FILE, + ), + ) + if requirements: + package_spec.requirements_gcs_uri = "{}/{}/{}".format( + staging_bucket, + gcs_dir_name, + _REQUIREMENTS_FILE, + ) + reasoning_engine_spec = types.ReasoningEngineSpec( + package_spec=package_spec, + ) + try: + schema_dict = _utils.generate_schema( + reasoning_engine.query, + schema_name=f"{type(reasoning_engine).__name__}_query", + ) + # Note: we append the schema post-initialization to avoid upstream + # issues in marshaling the data that would result in errors like: + # ../../../../../proto/marshal/rules/struct.py:140: in to_proto + # self._marshal.to_proto(struct_pb2.Value, v) for k, v in value.items() + # E AttributeError: 'list' object has no attribute 'items' + reasoning_engine_spec.class_methods.append(_utils.to_proto(schema_dict)) + except Exception as e: + _LOGGER.warning(f"failed to generate schema: {e}") + operation_future = sdk_resource.api_client.create_reasoning_engine( + parent=initializer.global_config.common_location_path( + project=sdk_resource.project, location=sdk_resource.location + ), + reasoning_engine=types.ReasoningEngine( + name=reasoning_engine_name, + display_name=display_name, + description=description, + spec=reasoning_engine_spec, + ), + ) + _LOGGER.log_create_with_lro(cls, operation_future) + created_resource = operation_future.result() + _LOGGER.log_create_complete( + cls, + created_resource, + cls._resource_noun, + module_name="vertexai.preview.reasoning_engines", + ) + # We use `._get_gca_resource(...)` instead of `created_resource` to + # fully instantiate the attributes of the reasoning engine. + sdk_resource._gca_resource = sdk_resource._get_gca_resource( + resource_name=created_resource.name + ) + sdk_resource.execution_api_client = ( + initializer.global_config.create_client( + client_class=aip_utils.ReasoningEngineExecutionClientWithOverride, + credentials=sdk_resource.credentials, + location_override=sdk_resource.location, + ) + ) + sdk_resource._operation_schemas = None + return sdk_resource + + def operation_schemas(self) -> Sequence[_utils.JsonDict]: + """Returns the (Open)API schemas for the Reasoning Engine.""" + spec = _utils.to_dict(self._gca_resource.spec) + if self._operation_schemas is None: + self._operation_schemas = spec.get("classMethods", []) + return self._operation_schemas + + def query(self, **kwargs) -> _utils.JsonDict: + """Runs the Reasoning Engine to serve the user query. + + This will be based on the `.query(...)` method of the python object that + was passed in when creating the Reasoning Engine. + + Args: + **kwargs: + Optional. The arguments of the `.query(...)` method. + + Returns: + dict[str, Any]: The response from serving the user query. + """ + response = self.execution_api_client.query_reasoning_engine( + request=types.QueryReasoningEngineRequest( + name=self.resource_name, + input=kwargs, + ), + ) + output = _utils.to_dict(response) + if "output" in output: + return output.get("output") + return output + + +def _prepare( + reasoning_engine: Queryable, + requirements: Sequence[str], + project: str, + location: str, + staging_bucket: str, + gcs_dir_name: str, + extra_packages: Sequence[str], + ) -> None: + """Prepares the reasoning engine for creation in Vertex AI. + + This involves packaging and uploading the artifacts to Cloud Storage. + + Args: + reasoning_engine: The reasoning engine to be prepared. + requirements (Sequence[str]): The set of PyPI dependencies needed. + project (str): The project for the staging bucket. + location (str): The location for the staging bucket. + staging_bucket (str): The staging bucket name in the form "gs://...". + gcs_dir_name (str): The GCS bucket directory under `staging_bucket` to + use for staging the artifacts needed. + extra_packages (Sequence[str]): The set of extra user-provided packages. + """ + try: + from google.cloud.exceptions import NotFound + except: + NotFound = Exception + storage = _utils._import_cloud_storage_or_raise() + cloudpickle = _utils._import_cloudpickle_or_raise() + storage_client = storage.Client(project=project) + staging_bucket = staging_bucket.replace("gs://", "") + try: + gcs_bucket = storage_client.get_bucket(staging_bucket) + _LOGGER.info(f"Using bucket {staging_bucket}") + except NotFound: + new_bucket = storage_client.bucket(staging_bucket) + gcs_bucket = storage_client.create_bucket(new_bucket, location=location) + _LOGGER.info(f"Creating bucket {staging_bucket} in {location=}") + + blob = gcs_bucket.blob(os.path.join(gcs_dir_name, _BLOB_FILENAME)) + with blob.open("wb") as f: + cloudpickle.dump(reasoning_engine, f) + dir_name = f"gs://{staging_bucket}/{gcs_dir_name}" + _LOGGER.info(f"Writing to {dir_name}/{_BLOB_FILENAME}") + + blob = gcs_bucket.blob(os.path.join(gcs_dir_name, _REQUIREMENTS_FILE)) + if requirements: + blob.upload_from_string("\n".join(requirements)) + _LOGGER.info(f"Writing to {dir_name}/{_REQUIREMENTS_FILE}") + + _LOGGER.info("Creating in-memory tarfile of extra_packages") + tar_fileobj = io.BytesIO() + with tarfile.open(fileobj=tar_fileobj, mode="w|gz") as tar: + for file in extra_packages: + tar.add(file) + tar_fileobj.seek(0) + blob = gcs_bucket.blob(os.path.join(gcs_dir_name, _EXTRA_PACKAGES_FILE)) + blob.upload_from_string(tar_fileobj.read()) + _LOGGER.info(f"Writing to {dir_name}/{_EXTRA_PACKAGES_FILE}") diff --git a/vertexai/reasoning_engines/_utils.py b/vertexai/reasoning_engines/_utils.py new file mode 100644 index 0000000000..43995289fd --- /dev/null +++ b/vertexai/reasoning_engines/_utils.py @@ -0,0 +1,227 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import inspect +import json +import types +import typing +from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Union + +import proto + +from google.protobuf import struct_pb2 +from google.protobuf import json_format + +JsonDict = Dict[str, Any] + + +def to_proto( + obj: Union[JsonDict, proto.Message], + message: proto.Message = struct_pb2.Struct(), +) -> proto.Message: + """Parses a JSON-like object into a message. + + If the object is already a message, this will return the object as-is. If + the object is a JSON Dict, this will parse and merge the object into the + message. + + Args: + obj (Union[dict[str, Any], proto.Message]): + Required. The object to convert to a proto message. + message (proto.Message): + Optional. A protocol buffer message to merge the obj into. It + defaults to Struct() if unspecified. + + Returns: + proto.Message: The same message passed as argument. + """ + if isinstance(obj, proto.Message): + return obj + try: + json_format.ParseDict(obj, message._pb) + except AttributeError: + json_format.ParseDict(obj, message) + return message + + +def to_dict(message: proto.Message) -> JsonDict: + """Converts the contents of the protobuf message to JSON format. + + Args: + message (proto.Message): + Required. The proto message to be converted to a JSON dictionary. + + Returns: + dict[str, Any]: A dictionary containing the contents of the proto. + """ + try: + # Best effort attempt to convert the message into a JSON dictionary. + result: JsonDict = json.loads( + json_format.MessageToJson(message._pb) + ) + except AttributeError: + result: JsonDict = json.loads(json_format.MessageToJson(message)) + return result + + +def generate_schema( + f: Callable[..., Any], + *, + schema_name: Optional[str] = None, + descriptions: Mapping[str, str] = {}, + required: Sequence[str] = [], +) -> JsonDict: + """Generates the OpenAPI Schema for a callable object. + + Only positional and keyword arguments of the function `f` will be supported + in the OpenAPI Schema that is generated. I.e. `*args` and `**kwargs` will + not be present in the OpenAPI schema returned from this function. For those + cases, you can either include it in the docstring for `f`, or modify the + OpenAPI schema returned from this function to include additional arguments. + + Args: + f (Callable): + Required. The function to generate an OpenAPI Schema for. + schema_name (str): + Optional. The name for the OpenAPI schema. If unspecified, the name + of the Callable will be used. + descriptions (Mapping[str, str]): + Optional. A `{name: description}` mapping for annotating input + arguments of the function with user-provided descriptions. It + defaults to an empty dictionary (i.e. there will not be any + description for any of the inputs). + required (Sequence[str]): + Optional. For the user to specify the set of required arguments in + function calls to `f`. If specified, it will be automatically + inferred from `f`. + + Returns: + dict[str, Any]: The OpenAPI Schema for the function `f` in JSON format. + """ + pydantic = _import_pydantic_or_raise() + defaults = dict(inspect.signature(f).parameters) + fields_dict = { + name: ( + # 1. We infer the argument type here: use Any rather than None so + # it will not try to auto-infer the type based on the default value. + ( + param.annotation if param.annotation != inspect.Parameter.empty + else Any + ), + pydantic.Field( + # 2. We do not support default values for now. + # default=( + # param.default if param.default != inspect.Parameter.empty + # else None + # ), + # 3. We support user-provided descriptions. + description=descriptions.get(name, None), + ) + ) + for name, param in defaults.items() + # We do not support *args or **kwargs + if param.kind in ( + inspect.Parameter.POSITIONAL_OR_KEYWORD, + inspect.Parameter.KEYWORD_ONLY, + inspect.Parameter.POSITIONAL_ONLY, + ) + } + parameters = pydantic.create_model(f.__name__, **fields_dict).schema() + # Postprocessing + # 4. Suppress unnecessary title generation: + # * https://github.com/pydantic/pydantic/issues/1051 + # * http://cl/586221780 + parameters.pop("title", "") + for name, function_arg in parameters.get("properties", {}).items(): + function_arg.pop("title", "") + annotation = defaults[name].annotation + # 5. Nullable fields: + # * https://github.com/pydantic/pydantic/issues/1270 + # * https://stackoverflow.com/a/58841311 + # * https://github.com/pydantic/pydantic/discussions/4872 + if ( + typing.get_origin(annotation) is typing.Union + and type(None) in typing.get_args(annotation) + ): + # for "typing.Optional" arguments, function_arg might be a + # dictionary like + # + # {'anyOf': [{'type': 'integer'}, {'type': 'null'}] + for schema in function_arg.pop("anyOf", []): + schema_type = schema.get("type") + if schema_type and schema_type != "null": + function_arg["type"] = schema_type + break + function_arg["nullable"] = True + # 6. Annotate required fields. + if required: + # We use the user-provided "required" fields if specified. + parameters["required"] = required + else: + # Otherwise we infer it from the function signature. + parameters["required"] = [ + k for k in defaults if ( + defaults[k].default == inspect.Parameter.empty + and defaults[k].kind in ( + inspect.Parameter.POSITIONAL_OR_KEYWORD, + inspect.Parameter.KEYWORD_ONLY, + inspect.Parameter.POSITIONAL_ONLY, + ) + ) + ] + schema = dict(name=f.__name__, description=f.__doc__, parameters=parameters) + if schema_name: + schema["name"] = schema_name + return schema + + +def _import_cloud_storage_or_raise() -> types.ModuleType: + """Tries to import the Cloud Storage module.""" + try: + from google.cloud import storage + except ImportError as e: + raise ImportError( + "Cloud Storage is not installed. Please call " + "'pip install google-cloud-aiplatform[reasoningengine]'." + ) from e + return storage + + +def _import_cloudpickle_or_raise() -> types.ModuleType: + """Tries to import the cloudpickle module.""" + try: + import cloudpickle # noqa:F401 + except ImportError as e: + raise ImportError( + "cloudpickle is not installed. Please call " + "'pip install google-cloud-aiplatform[reasoningengine]'." + ) from e + return cloudpickle + + +def _import_pydantic_or_raise() -> types.ModuleType: + """Tries to import the pydantic module.""" + try: + # For compatibility across Pydantic V1 and V2 + try: + from pydantic import v1 as pydantic + except ImportError: + import pydantic + except ImportError as e: + raise ImportError( + "pydantic is not installed. Please call " + "'pip install google-cloud-aiplatform[reasoningengine]'." + ) from e + return pydantic diff --git a/vertexai/resources/preview/__init__.py b/vertexai/resources/preview/__init__.py index c23b8a11f4..b4f8a3842d 100644 --- a/vertexai/resources/preview/__init__.py +++ b/vertexai/resources/preview/__init__.py @@ -26,9 +26,6 @@ Endpoint, Model, ) -from google.cloud.aiplatform.preview.persistent_resource import ( - PersistentResource, -) from google.cloud.aiplatform.preview.featurestore.entity_type import ( EntityType, ) diff --git a/vertexai/tuning/_supervised_tuning.py b/vertexai/tuning/_supervised_tuning.py new file mode 100644 index 0000000000..047b53cfa3 --- /dev/null +++ b/vertexai/tuning/_supervised_tuning.py @@ -0,0 +1,71 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from typing import Optional, Union + +from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job_types + +from vertexai import generative_models +from vertexai.tuning import _tuning + + +def train( + *, + source_model: Union[str, generative_models.GenerativeModel], + train_dataset: str, + validation_dataset: Optional[str] = None, + tuned_model_display_name: Optional[str] = None, + epochs: Optional[int] = None, + learning_rate_multiplier: Optional[float] = None, +) -> "SupervisedTuningJob": + """Tunes a model using supervised training. + + Args: + source_model (str): + Model name for tuning, e.g., "gemini-1.0-pro-002". + train_dataset: Cloud Storage path to file containing training dataset for tuning. + The dataset should be in JSONL format. + validation_dataset: Cloud Storage path to file containing validation dataset for tuning. + The dataset should be in JSONL format. + tuned_model_display_name: The display name of the + [TunedModel][google.cloud.aiplatform.v1.Model]. The name can + be up to 128 characters long and can consist of any UTF-8 characters. + epochs: Number of training epoches for this tuning job. + learning_rate_multiplier: Learning rate multiplier for tuning. + + Returns: + A `TuningJob` object. + """ + supervised_tuning_spec = gca_tuning_job_types.SupervisedTuningSpec( + training_dataset_uri=train_dataset, + validation_dataset_uri=validation_dataset, + hyper_parameters=gca_tuning_job_types.SupervisedHyperParameters( + epoch_count=epochs, + learning_rate_multiplier=learning_rate_multiplier, + ), + ) + + if isinstance(source_model, generative_models.GenerativeModel): + source_model = source_model._prediction_resource_name.rpartition('/')[-1] + + return SupervisedTuningJob._create( # pylint: disable=protected-access + base_model=source_model, + tuning_spec=supervised_tuning_spec, + tuned_model_display_name=tuned_model_display_name, + ) + + +class SupervisedTuningJob(_tuning.TuningJob): + pass diff --git a/vertexai/tuning/_tuning.py b/vertexai/tuning/_tuning.py new file mode 100644 index 0000000000..a3c43fca1e --- /dev/null +++ b/vertexai/tuning/_tuning.py @@ -0,0 +1,246 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# pylint: disable=protected-access +"""Classes to support Tuning.""" + +import typing +from typing import Dict, List, Optional, Union, Sequence + +from google.auth import credentials as auth_credentials + +from google.cloud import aiplatform +from google.cloud.aiplatform import base as aiplatform_base +from google.cloud.aiplatform import compat +from google.cloud.aiplatform import initializer as aiplatform_initializer +from google.cloud.aiplatform import jobs +from google.cloud.aiplatform import utils as aiplatform_utils +from google.cloud.aiplatform.utils import _ipython_utils +from google.cloud.aiplatform_v1.services import gen_ai_tuning_service as gen_ai_tuning_service_v1 +from google.cloud.aiplatform_v1.types import tuning_job as gca_tuning_job_types +from google.cloud.aiplatform_v1 import types as gca_types + +from google.rpc import status_pb2 # type: ignore + + +_LOGGER = aiplatform_base.Logger(__name__) + + +class TuningJobClientWithOverride(aiplatform_utils.ClientWithOverride): + _is_temporary = True + _default_version = compat.V1 + _version_map = ( + (compat.V1, gen_ai_tuning_service_v1.client.GenAiTuningServiceClient), + # v1beta1 version does not exist + # (compat.V1BETA1, gen_ai_tuning_service_v1beta1.client.GenAiTuningServiceClient), + ) + + +class TuningJob(aiplatform_base._VertexAiResourceNounPlus): + """Represents a TuningJob that runs with Google owned models.""" + + _resource_noun = "tuningJobs" + _getter_method = "get_tuning_job" + _list_method = "list_tuning_jobs" + _cancel_method = "cancel_tuning_job" + _delete_method = "delete_tuning_job" + _parse_resource_name_method = "parse_tuning_job_path" + _format_resource_name_method = "tuning_job_path" + _job_type = "tuning/tuningJob" + _has_displayed_experiments_button = False + + client_class = TuningJobClientWithOverride + + _gca_resource: gca_tuning_job_types.TuningJob + api_client: gen_ai_tuning_service_v1.client.GenAiTuningServiceClient + + def __init__(self, tuning_job_name: str): + super().__init__(resource_name=tuning_job_name) + self._gca_resource: gca_tuning_job_types.TuningJob = ( + self._get_gca_resource(resource_name=tuning_job_name) + ) + + def refresh(self) -> "TuningJob": + """Refreshed the tuning job from the service.""" + self._gca_resource: gca_tuning_job_types.TuningJob = ( + self._get_gca_resource(resource_name=self.resource_name) + ) + if self.experiment and not self._has_displayed_experiments_button: + self._has_displayed_experiments_button = True + _ipython_utils.display_experiment_button(self.experiment) + return self + + @property + def tuned_model_name(self) -> Optional[str]: + return self._gca_resource.tuned_model.model + + @property + def tuned_model_endpoint_name(self) -> Optional[str]: + return self._gca_resource.tuned_model.endpoint + + @property + def _experiment_name(self) -> Optional[str]: + return self._gca_resource.experiment + + @property + def experiment(self) -> Optional[aiplatform.Experiment]: + if self._experiment_name: + return aiplatform.Experiment(experiment_name=self._experiment_name) + + @property + def state(self) -> gca_types.JobState: + return self._gca_resource.state + + @property + def has_ended(self): + return self.state in jobs._JOB_COMPLETE_STATES + + @property + def has_succeeded(self): + return self.state == gca_types.JobState.JOB_STATE_SUCCEEDED + + @property + def error(self) -> Optional[status_pb2.Status]: + return self._gca_resource.error + + @property + def tuning_data_statistics(self) -> gca_tuning_job_types.TuningDataStats: + return self._gca_resource.tuning_data_stats + + @classmethod + def _create( + cls, + *, + base_model: str, + tuning_spec: Union[gca_tuning_job_types.SupervisedTuningSpec], + tuned_model_display_name: Optional[str] = None, + description: Optional[str] = None, + labels: Optional[Dict[str, str]] = None, + project: Optional[str] = None, + location: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, + ) -> "TuningJob": + r"""Submits TuningJob. + + Args: + base_model (str): + Model name for tuning, e.g., "gemini-1.0-pro" + or "gemini-1.0-pro-001". + + This field is a member of `oneof`_ ``source_model``. + tuning_spec: Tuning Spec for Fine Tuning. + Supported types: SupervisedTuningSpec. + tuned_model_display_name: The display name of the + [TunedModel][google.cloud.aiplatform.v1.Model]. The name can + be up to 128 characters long and can consist of any UTF-8 + characters. + description: The description of the `TuningJob`. + labels: The labels with user-defined metadata to organize + [TuningJob][google.cloud.aiplatform.v1.TuningJob] and + generated resources such as + [Model][google.cloud.aiplatform.v1.Model] and + [Endpoint][google.cloud.aiplatform.v1.Endpoint]. + + Label keys and values can be no longer than 64 characters + (Unicode codepoints), can only contain lowercase letters, + numeric characters, underscores and dashes. International + characters are allowed. + + See https://goo.gl/xmQnxf for more information and examples + of labels. + project: Project to run the tuning job in. + Overrides project set in aiplatform.init. + location: Location to run the tuning job in. + Overrides location set in aiplatform.init. + credentials: Custom credentials to use to call tuning job service. + Overrides credentials set in aiplatform.init. + + Returns: + Submitted TuningJob. + + Raises: + RuntimeError is tuning_spec kind is unsupported + """ + _LOGGER.log_create_with_lro(cls) + + if not tuned_model_display_name: + tuned_model_display_name = cls._generate_display_name() + + gca_tuning_job = gca_tuning_job_types.TuningJob( + base_model=base_model, + tuned_model_display_name=tuned_model_display_name, + description=description, + labels=labels, + # The tuning_spec one_of is set later + ) + + if isinstance(tuning_spec, gca_tuning_job_types.SupervisedTuningSpec): + gca_tuning_job.supervised_tuning_spec = tuning_spec + else: + raise RuntimeError(f"Unsupported tuning_spec kind: {tuning_spec}") + + tuning_job: TuningJob = cls._construct_sdk_resource_from_gapic( + gapic_resource=gca_tuning_job, + project=project, + location=location, + credentials=credentials, + ) + + parent = aiplatform_initializer.global_config.common_location_path( + project=project, location=location + ) + + created_gca_tuning_job = tuning_job.api_client.create_tuning_job( + parent=parent, + tuning_job=gca_tuning_job, + ) + tuning_job._gca_resource = created_gca_tuning_job + + _LOGGER.log_create_complete( + cls=cls, + resource=created_gca_tuning_job, + variable_name="tuning_job", + module_name="sft", + ) + _LOGGER.info(f"View Tuning Job:\n{tuning_job._dashboard_url()}") + if tuning_job._experiment_name: + _LOGGER.info( + f"View experiment:\n{tuning_job._experiment.dashboard_url}" + ) + + return tuning_job + + def cancel(self): + self.api_client.cancel_tuning_job(name=self.resource_name) + + @classmethod + def list(cls, filter: Optional[str] = None) -> List["TuningJob"]: + """Lists TuningJobs. + + Args: + filter: The standard list filter. + + Returns: + A list of TuningJob objects. + """ + return cls._list(filter=filter) + + def _dashboard_url(self) -> str: + """Returns the Google Cloud console URL where job can be viewed.""" + fields = self._parse_resource_name(self.resource_name) + location = fields.pop("location") + project = fields.pop("project") + job = list(fields.values())[0] + url = f"https://console.cloud.google.com/vertex-ai/generative/language/locations/{location}/tuning/tuningJob/{job}?project={project}" + return url diff --git a/vertexai/vision_models/_vision_models.py b/vertexai/vision_models/_vision_models.py index e231efe83a..12c973acb0 100644 --- a/vertexai/vision_models/_vision_models.py +++ b/vertexai/vision_models/_vision_models.py @@ -319,7 +319,10 @@ def _generate_images( mask: Optional["Image"] = None, edit_mode: Optional[ Literal[ - "inpainting-insert", "inpainting-remove", "outpainting", "product-image" + "inpainting-insert", + "inpainting-remove", + "outpainting", + "product-image", ] ] = None, mask_mode: Optional[Literal["background", "foreground", "semantic"]] = None, @@ -330,7 +333,7 @@ def _generate_images( compression_quality: Optional[float] = None, language: Optional[str] = None, output_gcs_uri: Optional[str] = None, - add_watermark: Optional[bool] = False, + add_watermark: Optional[bool] = None, safety_filter_level: Optional[ Literal["block_most", "block_some", "block_few", "block_fewest"] ] = None, @@ -354,62 +357,53 @@ def _generate_images( * 4:3 - Landscape, desktop ratio. * 3:4 - Portrait, desktop ratio guidance_scale: Controls the strength of the prompt. Suggested values - are - - * 0-9 (low strength) - * 10-20 (medium strength) - * 21+ (high strength) + are - * 0-9 (low strength) * 10-20 (medium strength) * 21+ (high + strength) seed: Image generation random seed. base_image: Base image to use for the image generation. mask: Mask for the base image. edit_mode: Describes the editing mode for the request. Supported values - are - - * inpainting-insert: fills the mask area based on the text prompt - (requires mask and text) - * inpainting-remove: removes the object(s) in the mask area. - (requires mask) - * outpainting: extend the image based on the mask area. - (Requires mask) - * product-image: Changes the background for the predominant product - or subject in the image + are - * inpainting-insert: fills the mask area based on the text + prompt (requires mask and text) * inpainting-remove: removes the + object(s) in the mask area. (requires mask) + * outpainting: extend the image based on the mask area. (Requires + mask) * product-image: Changes the background for the predominant + product or subject in the image mask_mode: Solicits generation of the mask (v/s providing mask as an - input). Supported values are: + input). Supported values are: * background: Automatically generates a mask for all regions except - the primary subject(s) of the image + the primary subject(s) of the image * foreground: Automatically generates a mask for the primary - subjects(s) of the image. + subjects(s) of the image. * semantic: Segment one or more of the segmentation classes using - class ID + class ID segmentation_classes: List of class IDs for segmentation. Max of 5 IDs mask_dilation: Defines the dilation percentage of the mask provided. - Float between 0 and 1. Defaults to 0.03 + Float between 0 and 1. Defaults to 0.03 product_position: Defines whether the product should stay fixed or be - repositioned. Supported Values: + repositioned. Supported Values: * fixed: Fixed position * reposition: Can be moved (default) output_mime_type: Which image format should the output be saved as. - Supported values: - * image/png: Save as a PNG image - * image/jpeg: Save as a JPEG image + Supported values: * image/png: Save as a PNG image * image/jpeg: Save + as a JPEG image compression_quality: Level of compression if the output mime type is - selected to be image/jpeg. Float between 0 to 100 + selected to be image/jpeg. Float between 0 to 100 language: Language of the text prompt for the image. Default: None. - Supported values are `"en"` for English, `"hi"` for Hindi, `"ja"` - for Japanese, `"ko"` for Korean, and `"auto"` for automatic language - detection. + Supported values are `"en"` for English, `"hi"` for Hindi, `"ja"` for + Japanese, `"ko"` for Korean, and `"auto"` for automatic language + detection. output_gcs_uri: Google Cloud Storage uri to store the generated images. add_watermark: Add a watermark to the generated image safety_filter_level: Adds a filter level to Safety filtering. Supported - values are: - * "block_most" : Strongest filtering level, most strict - blocking - * "block_some" : Block some problematic prompts and responses - * "block_few" : Block fewer problematic prompts and responses - * "block_fewest" : Block very few problematic prompts and responses + values are: * "block_most" : Strongest filtering level, most strict + blocking * "block_some" : Block some problematic prompts and responses + * "block_few" : Block fewer problematic prompts and responses * + "block_fewest" : Block very few problematic prompts and responses person_generation: Allow generation of people by the model Supported - values are: - * "dont_allow" : Block generation of people - * "allow_adult" : Generate adults, but not children - * "allow_all" : Generate adults and children + values are: * "dont_allow" : Block generation of people * + "allow_adult" : Generate adults, but not children * "allow_all" : + Generate adults and children Returns: An `ImageGenerationResponse` object. @@ -566,7 +560,7 @@ def generate_images( language: Optional[str] = None, seed: Optional[int] = None, output_gcs_uri: Optional[str] = None, - add_watermark: Optional[bool] = False, + add_watermark: Optional[bool] = True, safety_filter_level: Optional[ Literal["block_most", "block_some", "block_few", "block_fewest"] ] = None, @@ -1240,7 +1234,7 @@ class ImageTextModel(ImageCaptioningModel, ImageQnAModel): @dataclasses.dataclass class WatermarkVerificationResponse: - __module__ = "vertex.preview.vision_models" + __module__ = "vertexai.preview.vision_models" _prediction_response: Any watermark_verification_result: Optional[str] = None @@ -1251,7 +1245,7 @@ class WatermarkVerificationModel(_model_garden_models._ModelGardenModel): __module__ = "vertexai.preview.vision_models" - _INSTANCE_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/predict/instance/watermark_watermark_verification_model_1.0.0.yaml" + _INSTANCE_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/predict/instance/watermark_verification_model_1.0.0.yaml" def verify_image(self, image: Image) -> WatermarkVerificationResponse: """Verifies the watermark of an image.